[clang] e247fed - [RISCV] add Half-precision test for clang

Shao-Ce SUN via cfe-commits cfe-commits at lists.llvm.org
Tue Sep 21 20:07:11 PDT 2021


Author: Shao-Ce SUN
Date: 2021-09-22T11:06:57+08:00
New Revision: e247fed23b9220e59ec5438d4c2013c0bf7f3fe9

URL: https://github.com/llvm/llvm-project/commit/e247fed23b9220e59ec5438d4c2013c0bf7f3fe9
DIFF: https://github.com/llvm/llvm-project/commit/e247fed23b9220e59ec5438d4c2013c0bf7f3fe9.diff

LOG: [RISCV] add Half-precision test for clang

and deleted useless lines.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D109799

Added: 
    

Modified: 
    clang/test/CodeGen/RISCV/rvv-intrinsics/vcompress.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfclass.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfmacc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfmadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsac.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfmv.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmacc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsac.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmacc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmsac.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmacc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmsac.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredosum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vlmul.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vlse.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vlsseg.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmv.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vreinterpret.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxei.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxseg.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vsse.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vsseg.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vssseg.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxei.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxseg.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vundefined.c

Removed: 
    


################################################################################
diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vcompress.c
index b13a18bac98f..9277d8d6feae 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vcompress.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vcompress.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -481,3 +482,57 @@ vfloat64m4_t test_vcompress_vm_f64m4 (vbool16_t mask, vfloat64m4_t dest, vfloat6
 vfloat64m8_t test_vcompress_vm_f64m8 (vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t vl) {
   return vcompress_vm_f64m8(mask, dest, src, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vcompress.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vcompress_vm_f16mf4 (vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t vl) {
+  return vcompress_vm_f16mf4(mask, dest, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vcompress.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vcompress_vm_f16mf2 (vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t vl) {
+  return vcompress_vm_f16mf2(mask, dest, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcompress_vm_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vcompress.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vcompress_vm_f16m1 (vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t vl) {
+  return vcompress_vm_f16m1(mask, dest, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcompress_vm_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vcompress.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vcompress_vm_f16m2 (vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t vl) {
+  return vcompress_vm_f16m2(mask, dest, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcompress_vm_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vcompress.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vcompress_vm_f16m4 (vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t vl) {
+  return vcompress_vm_f16m4(mask, dest, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcompress_vm_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vcompress.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vcompress_vm_f16m8 (vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t vl) {
+  return vcompress_vm_f16m8(mask, dest, src, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c
index 4fa3b2a73f5e..486b4c8661c7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -167,3 +168,110 @@ vfloat64m8_t test_vfabs_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat
   return vfabs_v_f64m8_m(mask, maskedoff, op1, vl);
 }
 
+// CHECK-RV64-LABEL: @test_vfabs_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfabs_v_f16mf4 (vfloat16mf4_t op1, size_t vl) {
+  return vfabs_v_f16mf4(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfabs_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfabs_v_f16mf2 (vfloat16mf2_t op1, size_t vl) {
+  return vfabs_v_f16mf2(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfabs_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfabs_v_f16m1 (vfloat16m1_t op1, size_t vl) {
+  return vfabs_v_f16m1(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfabs_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfabs_v_f16m2 (vfloat16m2_t op1, size_t vl) {
+  return vfabs_v_f16m2(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfabs_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfabs_v_f16m4 (vfloat16m4_t op1, size_t vl) {
+  return vfabs_v_f16m4(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfabs_v_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfabs_v_f16m8 (vfloat16m8_t op1, size_t vl) {
+  return vfabs_v_f16m8(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfabs_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfabs_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
+  return vfabs_v_f16mf4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfabs_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfabs_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
+  return vfabs_v_f16mf2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfabs_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfabs_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
+  return vfabs_v_f16m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfabs_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfabs_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
+  return vfabs_v_f16m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfabs_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfabs_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
+  return vfabs_v_f16m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfabs_v_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfabs_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
+  return vfabs_v_f16m8_m(mask, maskedoff, op1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfclass.c
index bcde19568b84..41152fb7aa10 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfclass.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfclass.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -175,3 +176,111 @@ vuint64m8_t test_vfclass_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
                                    vfloat64m8_t op1, size_t vl) {
   return vfclass_v_u64m8_m(mask, maskedoff, op1, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfclass_v_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_u16mf4 (vfloat16mf4_t op1, size_t vl) {
+  return vfclass_v_u16mf4(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfclass_v_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_u16mf2 (vfloat16mf2_t op1, size_t vl) {
+  return vfclass_v_u16mf2(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfclass_v_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_u16m1 (vfloat16m1_t op1, size_t vl) {
+  return vfclass_v_u16m1(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfclass_v_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_u16m2 (vfloat16m2_t op1, size_t vl) {
+  return vfclass_v_u16m2(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfclass_v_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_u16m4 (vfloat16m4_t op1, size_t vl) {
+  return vfclass_v_u16m4(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfclass_v_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_u16m8 (vfloat16m8_t op1, size_t vl) {
+  return vfclass_v_u16m8(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfclass_v_u16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1f16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
+  return vfclass_v_u16mf4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfclass_v_u16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2f16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
+  return vfclass_v_u16mf2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfclass_v_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4f16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
+  return vfclass_v_u16m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfclass_v_u16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8f16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
+  return vfclass_v_u16m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfclass_v_u16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16f16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
+  return vfclass_v_u16m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfclass_v_u16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32f16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
+  return vfclass_v_u16m8_m(mask, maskedoff, op1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c
index 55a4a64d0a5d..ba84148a0a99 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -1032,3 +1033,651 @@ vfloat64m8_t test_vfcvt_f_xu_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
                                        vuint64m8_t src, size_t vl) {
   return vfcvt_f_xu_v_f64m8_m(mask, maskedoff, src, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vfcvt_x_f_v_i16mf4 (vfloat16mf4_t src, size_t vl) {
+  return vfcvt_x_f_v_i16mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4 (vfloat16mf4_t src, size_t vl) {
+  return vfcvt_rtz_x_f_v_i16mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vfcvt_x_f_v_i16mf2 (vfloat16mf2_t src, size_t vl) {
+  return vfcvt_x_f_v_i16mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2 (vfloat16mf2_t src, size_t vl) {
+  return vfcvt_rtz_x_f_v_i16mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vfcvt_x_f_v_i16m1 (vfloat16m1_t src, size_t vl) {
+  return vfcvt_x_f_v_i16m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vfcvt_rtz_x_f_v_i16m1 (vfloat16m1_t src, size_t vl) {
+  return vfcvt_rtz_x_f_v_i16m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vfcvt_x_f_v_i16m2 (vfloat16m2_t src, size_t vl) {
+  return vfcvt_x_f_v_i16m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vfcvt_rtz_x_f_v_i16m2 (vfloat16m2_t src, size_t vl) {
+  return vfcvt_rtz_x_f_v_i16m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vfcvt_x_f_v_i16m4 (vfloat16m4_t src, size_t vl) {
+  return vfcvt_x_f_v_i16m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vfcvt_rtz_x_f_v_i16m4 (vfloat16m4_t src, size_t vl) {
+  return vfcvt_rtz_x_f_v_i16m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16.i64(<vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vfcvt_x_f_v_i16m8 (vfloat16m8_t src, size_t vl) {
+  return vfcvt_x_f_v_i16m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16.i64(<vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vfcvt_rtz_x_f_v_i16m8 (vfloat16m8_t src, size_t vl) {
+  return vfcvt_rtz_x_f_v_i16m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfcvt_xu_f_v_u16mf4 (vfloat16mf4_t src, size_t vl) {
+  return vfcvt_xu_f_v_u16mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4 (vfloat16mf4_t src, size_t vl) {
+  return vfcvt_rtz_xu_f_v_u16mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfcvt_xu_f_v_u16mf2 (vfloat16mf2_t src, size_t vl) {
+  return vfcvt_xu_f_v_u16mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2 (vfloat16mf2_t src, size_t vl) {
+  return vfcvt_rtz_xu_f_v_u16mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfcvt_xu_f_v_u16m1 (vfloat16m1_t src, size_t vl) {
+  return vfcvt_xu_f_v_u16m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1 (vfloat16m1_t src, size_t vl) {
+  return vfcvt_rtz_xu_f_v_u16m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfcvt_xu_f_v_u16m2 (vfloat16m2_t src, size_t vl) {
+  return vfcvt_xu_f_v_u16m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2 (vfloat16m2_t src, size_t vl) {
+  return vfcvt_rtz_xu_f_v_u16m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfcvt_xu_f_v_u16m4 (vfloat16m4_t src, size_t vl) {
+  return vfcvt_xu_f_v_u16m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4 (vfloat16m4_t src, size_t vl) {
+  return vfcvt_rtz_xu_f_v_u16m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16.i64(<vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfcvt_xu_f_v_u16m8 (vfloat16m8_t src, size_t vl) {
+  return vfcvt_xu_f_v_u16m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16.i64(<vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8 (vfloat16m8_t src, size_t vl) {
+  return vfcvt_rtz_xu_f_v_u16m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfcvt_f_x_v_f16mf4 (vint16mf4_t src, size_t vl) {
+  return vfcvt_f_x_v_f16mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfcvt_f_x_v_f16mf2 (vint16mf2_t src, size_t vl) {
+  return vfcvt_f_x_v_f16mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfcvt_f_x_v_f16m1 (vint16m1_t src, size_t vl) {
+  return vfcvt_f_x_v_f16m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfcvt_f_x_v_f16m2 (vint16m2_t src, size_t vl) {
+  return vfcvt_f_x_v_f16m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfcvt_f_x_v_f16m4 (vint16m4_t src, size_t vl) {
+  return vfcvt_f_x_v_f16m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfcvt_f_x_v_f16m8 (vint16m8_t src, size_t vl) {
+  return vfcvt_f_x_v_f16m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4 (vuint16mf4_t src, size_t vl) {
+  return vfcvt_f_xu_v_f16mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2 (vuint16mf2_t src, size_t vl) {
+  return vfcvt_f_xu_v_f16mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfcvt_f_xu_v_f16m1 (vuint16m1_t src, size_t vl) {
+  return vfcvt_f_xu_v_f16m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfcvt_f_xu_v_f16m2 (vuint16m2_t src, size_t vl) {
+  return vfcvt_f_xu_v_f16m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfcvt_f_xu_v_f16m4 (vuint16m4_t src, size_t vl) {
+  return vfcvt_f_xu_v_f16m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfcvt_f_xu_v_f16m8 (vuint16m8_t src, size_t vl) {
+  return vfcvt_f_xu_v_f16m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i16.nxv1f16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vfcvt_x_f_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) {
+  return vfcvt_x_f_v_i16mf4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) {
+  return vfcvt_rtz_x_f_v_i16mf4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vfcvt_x_f_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) {
+  return vfcvt_x_f_v_i16mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) {
+  return vfcvt_rtz_x_f_v_i16mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vfcvt_x_f_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) {
+  return vfcvt_x_f_v_i16m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) {
+  return vfcvt_rtz_x_f_v_i16m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vfcvt_x_f_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) {
+  return vfcvt_x_f_v_i16m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) {
+  return vfcvt_rtz_x_f_v_i16m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vfcvt_x_f_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) {
+  return vfcvt_x_f_v_i16m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) {
+  return vfcvt_rtz_x_f_v_i16m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vfcvt_x_f_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) {
+  return vfcvt_x_f_v_i16m8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) {
+  return vfcvt_rtz_x_f_v_i16m8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) {
+  return vfcvt_xu_f_v_u16mf4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) {
+  return vfcvt_rtz_xu_f_v_u16mf4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) {
+  return vfcvt_xu_f_v_u16mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) {
+  return vfcvt_rtz_xu_f_v_u16mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfcvt_xu_f_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) {
+  return vfcvt_xu_f_v_u16m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) {
+  return vfcvt_rtz_xu_f_v_u16m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfcvt_xu_f_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) {
+  return vfcvt_xu_f_v_u16m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) {
+  return vfcvt_rtz_xu_f_v_u16m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfcvt_xu_f_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) {
+  return vfcvt_xu_f_v_u16m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) {
+  return vfcvt_rtz_xu_f_v_u16m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfcvt_xu_f_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) {
+  return vfcvt_xu_f_v_u16m8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) {
+  return vfcvt_rtz_xu_f_v_u16m8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) {
+  return vfcvt_f_x_v_f16mf4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) {
+  return vfcvt_f_x_v_f16mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfcvt_f_x_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) {
+  return vfcvt_f_x_v_f16m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f16.nxv8i16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfcvt_f_x_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) {
+  return vfcvt_f_x_v_f16m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv16f16.nxv16i16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfcvt_f_x_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) {
+  return vfcvt_f_x_v_f16m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv32f16.nxv32i16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfcvt_f_x_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) {
+  return vfcvt_f_x_v_f16m8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) {
+  return vfcvt_f_xu_v_f16mf4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) {
+  return vfcvt_f_xu_v_f16mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfcvt_f_xu_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) {
+  return vfcvt_f_xu_v_f16m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfcvt_f_xu_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) {
+  return vfcvt_f_xu_v_f16m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfcvt_f_xu_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) {
+  return vfcvt_f_xu_v_f16m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfcvt_f_xu_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) {
+  return vfcvt_f_xu_v_f16m8_m(mask, maskedoff, src, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c
index ccad33064644..77aff2d232a2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -364,3 +365,219 @@ vfloat64m8_t test_vfdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
                                    vfloat64m8_t op1, double op2, size_t vl) {
   return vfdiv_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfdiv_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vfdiv_vv_f16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfdiv_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vfdiv_vf_f16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfdiv_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vfdiv_vv_f16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfdiv_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vfdiv_vf_f16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfdiv_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vfdiv_vv_f16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfdiv_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vfdiv_vf_f16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfdiv_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vfdiv_vv_f16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfdiv_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vfdiv_vf_f16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfdiv_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vfdiv_vv_f16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfdiv_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vfdiv_vf_f16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfdiv_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return vfdiv_vv_f16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfdiv_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return vfdiv_vf_f16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfdiv_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vfdiv_vv_f16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfdiv_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vfdiv_vf_f16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfdiv.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfdiv_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vfdiv_vv_f16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfdiv.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfdiv_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vfdiv_vf_f16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfdiv.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfdiv_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vfdiv_vv_f16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfdiv.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfdiv_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vfdiv_vf_f16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfdiv.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfdiv_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vfdiv_vv_f16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfdiv.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfdiv_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vfdiv_vf_f16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfdiv.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfdiv_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vfdiv_vv_f16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfdiv.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfdiv_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vfdiv_vf_f16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfdiv_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return vfdiv_vv_f16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfdiv.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfdiv_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return vfdiv_vf_f16m8_m(mask, maskedoff, op1, op2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmacc.c
index 072a32e28c35..e0c1935fb093 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmacc.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -373,3 +374,219 @@ vfloat64m8_t test_vfmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1,
                                     vfloat64m8_t op2, size_t vl) {
   return vfmacc_vf_f64m8_m(mask, acc, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfmacc_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfmacc_vv_f16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfmacc_vf_f16mf4 (vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfmacc_vf_f16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmacc.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfmacc_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfmacc_vv_f16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmacc.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfmacc_vf_f16mf2 (vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfmacc_vf_f16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmacc.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfmacc_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
+  return vfmacc_vv_f16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmacc.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfmacc_vf_f16m1 (vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
+  return vfmacc_vf_f16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmacc.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfmacc_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
+  return vfmacc_vv_f16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmacc.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfmacc_vf_f16m2 (vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
+  return vfmacc_vf_f16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmacc.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfmacc_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
+  return vfmacc_vv_f16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmacc.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfmacc_vf_f16m4 (vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
+  return vfmacc_vf_f16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmacc.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfmacc_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
+  return vfmacc_vv_f16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmacc.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfmacc_vf_f16m8 (vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
+  return vfmacc_vf_f16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfmacc_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfmacc_vv_f16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfmacc_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfmacc_vf_f16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfmacc_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfmacc_vv_f16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmacc.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfmacc_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfmacc_vf_f16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfmacc_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
+  return vfmacc_vv_f16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmacc.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfmacc_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
+  return vfmacc_vf_f16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfmacc_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
+  return vfmacc_vv_f16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmacc.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfmacc_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
+  return vfmacc_vf_f16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfmacc_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
+  return vfmacc_vv_f16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmacc.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfmacc_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
+  return vfmacc_vf_f16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmacc.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfmacc_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
+  return vfmacc_vv_f16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmacc.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfmacc_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
+  return vfmacc_vf_f16m8_m(mask, vd, rs1, vs2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmadd.c
index 335343ee891c..d66779593c08 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmadd.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -373,3 +374,219 @@ vfloat64m8_t test_vfmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1,
                                     vfloat64m8_t op2, size_t vl) {
   return vfmadd_vf_f64m8_m(mask, acc, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfmadd_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfmadd_vv_f16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfmadd_vf_f16mf4 (vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfmadd_vf_f16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfmadd_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfmadd_vv_f16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfmadd_vf_f16mf2 (vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfmadd_vf_f16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfmadd_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
+  return vfmadd_vv_f16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfmadd_vf_f16m1 (vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
+  return vfmadd_vf_f16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfmadd_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
+  return vfmadd_vv_f16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfmadd_vf_f16m2 (vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
+  return vfmadd_vf_f16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfmadd_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
+  return vfmadd_vv_f16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfmadd_vf_f16m4 (vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
+  return vfmadd_vf_f16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmadd.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfmadd_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
+  return vfmadd_vv_f16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmadd.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfmadd_vf_f16m8 (vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
+  return vfmadd_vf_f16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfmadd_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfmadd_vv_f16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfmadd_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfmadd_vf_f16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfmadd_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfmadd_vv_f16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfmadd_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfmadd_vf_f16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfmadd_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
+  return vfmadd_vv_f16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfmadd_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
+  return vfmadd_vf_f16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfmadd_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
+  return vfmadd_vv_f16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfmadd_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
+  return vfmadd_vf_f16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfmadd_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
+  return vfmadd_vv_f16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfmadd_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
+  return vfmadd_vf_f16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmadd.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfmadd_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
+  return vfmadd_vv_f16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmadd.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfmadd_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
+  return vfmadd_vf_f16m8_m(mask, vd, rs1, vs2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c
index ba17d0a995ea..608bf6c7927b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -364,3 +365,219 @@ vfloat64m8_t test_vfmax_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
                                    vfloat64m8_t op1, double op2, size_t vl) {
   return vfmax_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfmax_vv_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfmax_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vfmax_vv_f16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfmax_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vfmax_vf_f16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmax_vv_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfmax_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vfmax_vv_f16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfmax_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vfmax_vf_f16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmax_vv_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfmax_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vfmax_vv_f16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmax_vf_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfmax_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vfmax_vf_f16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmax_vv_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfmax_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vfmax_vv_f16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmax_vf_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfmax_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vfmax_vf_f16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmax_vv_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfmax_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vfmax_vv_f16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmax_vf_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfmax_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vfmax_vf_f16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmax_vv_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfmax_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return vfmax_vv_f16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmax_vf_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfmax_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return vfmax_vf_f16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmax_vv_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfmax_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vfmax_vv_f16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmax.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfmax_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vfmax_vf_f16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmax_vv_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfmax_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vfmax_vv_f16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmax.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfmax_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vfmax_vf_f16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmax_vv_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfmax_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vfmax_vv_f16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmax_vf_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmax.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfmax_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vfmax_vf_f16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmax_vv_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfmax_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vfmax_vv_f16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmax_vf_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmax.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfmax_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vfmax_vf_f16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmax_vv_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfmax_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vfmax_vv_f16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmax_vf_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmax.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfmax_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vfmax_vf_f16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmax_vv_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfmax_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return vfmax_vv_f16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmax_vf_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmax.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfmax_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return vfmax_vf_f16m8_m(mask, maskedoff, op1, op2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c
index abdb7173bc43..eb7afbb93df5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -94,3 +95,57 @@ vfloat64m8_t test_vfmerge_vfm_f64m8(vbool8_t mask, vfloat64m8_t op1, double op2,
                                     size_t vl) {
   return vfmerge_vfm_f64m8(mask, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfmerge_vfm_f16mf4 (vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vfmerge_vfm_f16mf4(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfmerge_vfm_f16mf2 (vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vfmerge_vfm_f16mf2(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfmerge_vfm_f16m1 (vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vfmerge_vfm_f16m1(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfmerge_vfm_f16m2 (vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vfmerge_vfm_f16m2(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfmerge_vfm_f16m4 (vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vfmerge_vfm_f16m4(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfmerge_vfm_f16m8 (vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return vfmerge_vfm_f16m8(mask, op1, op2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c
index 3067ceee1001..b4c7ce2b910b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -364,3 +365,219 @@ vfloat64m8_t test_vfmin_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
                                    vfloat64m8_t op1, double op2, size_t vl) {
   return vfmin_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfmin_vv_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfmin_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vfmin_vv_f16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfmin_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vfmin_vf_f16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmin_vv_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfmin_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vfmin_vv_f16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfmin_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vfmin_vf_f16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmin_vv_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfmin_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vfmin_vv_f16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmin_vf_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfmin_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vfmin_vf_f16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmin_vv_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfmin_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vfmin_vv_f16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmin_vf_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfmin_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vfmin_vf_f16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmin_vv_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfmin_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vfmin_vv_f16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmin_vf_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfmin_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vfmin_vf_f16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmin_vv_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfmin_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return vfmin_vv_f16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmin_vf_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfmin_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return vfmin_vf_f16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmin_vv_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfmin_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vfmin_vv_f16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfmin_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vfmin_vf_f16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmin_vv_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfmin_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vfmin_vv_f16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmin.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfmin_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vfmin_vf_f16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmin_vv_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfmin_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vfmin_vv_f16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmin_vf_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmin.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfmin_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vfmin_vf_f16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmin_vv_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfmin_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vfmin_vv_f16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmin_vf_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmin.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfmin_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vfmin_vf_f16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmin_vv_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfmin_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vfmin_vv_f16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmin_vf_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmin.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfmin_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vfmin_vf_f16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmin_vv_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfmin_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return vfmin_vv_f16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmin_vf_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmin.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfmin_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return vfmin_vf_f16m8_m(mask, maskedoff, op1, op2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsac.c
index b35bbc857d2d..754c022acd11 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsac.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -373,3 +374,219 @@ vfloat64m8_t test_vfmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1,
                                     vfloat64m8_t op2, size_t vl) {
   return vfmsac_vf_f64m8_m(mask, acc, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfmsac_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfmsac_vv_f16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfmsac_vf_f16mf4 (vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfmsac_vf_f16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfmsac_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfmsac_vv_f16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfmsac_vf_f16mf2 (vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfmsac_vf_f16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfmsac_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
+  return vfmsac_vv_f16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfmsac_vf_f16m1 (vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
+  return vfmsac_vf_f16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfmsac_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
+  return vfmsac_vv_f16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfmsac_vf_f16m2 (vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
+  return vfmsac_vf_f16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfmsac_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
+  return vfmsac_vv_f16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfmsac_vf_f16m4 (vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
+  return vfmsac_vf_f16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmsac.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfmsac_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
+  return vfmsac_vv_f16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmsac.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfmsac_vf_f16m8 (vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
+  return vfmsac_vf_f16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfmsac_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfmsac_vv_f16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfmsac_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfmsac_vf_f16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfmsac_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfmsac_vv_f16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfmsac_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfmsac_vf_f16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfmsac_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
+  return vfmsac_vv_f16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfmsac_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
+  return vfmsac_vf_f16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfmsac_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
+  return vfmsac_vv_f16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfmsac_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
+  return vfmsac_vf_f16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfmsac_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
+  return vfmsac_vv_f16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfmsac_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
+  return vfmsac_vf_f16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmsac.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfmsac_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
+  return vfmsac_vv_f16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmsac.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfmsac_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
+  return vfmsac_vf_f16m8_m(mask, vd, rs1, vs2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsub.c
index 0266d5168d82..b7ba904c38e0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsub.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -373,3 +374,219 @@ vfloat64m8_t test_vfmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1,
                                     vfloat64m8_t op2, size_t vl) {
   return vfmsub_vf_f64m8_m(mask, acc, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfmsub_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfmsub_vv_f16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfmsub_vf_f16mf4 (vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfmsub_vf_f16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmsub.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfmsub_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfmsub_vv_f16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmsub.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfmsub_vf_f16mf2 (vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfmsub_vf_f16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmsub.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfmsub_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
+  return vfmsub_vv_f16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmsub.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfmsub_vf_f16m1 (vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
+  return vfmsub_vf_f16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmsub.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfmsub_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
+  return vfmsub_vv_f16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmsub.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfmsub_vf_f16m2 (vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
+  return vfmsub_vf_f16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmsub.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfmsub_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
+  return vfmsub_vv_f16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmsub.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfmsub_vf_f16m4 (vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
+  return vfmsub_vf_f16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmsub.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfmsub_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
+  return vfmsub_vv_f16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmsub.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfmsub_vf_f16m8 (vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
+  return vfmsub_vf_f16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfmsub_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfmsub_vv_f16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmsub.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfmsub_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfmsub_vf_f16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfmsub_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfmsub_vv_f16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmsub.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfmsub_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfmsub_vf_f16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfmsub_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
+  return vfmsub_vv_f16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmsub.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfmsub_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
+  return vfmsub_vf_f16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfmsub_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
+  return vfmsub_vv_f16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmsub.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfmsub_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
+  return vfmsub_vf_f16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfmsub_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
+  return vfmsub_vv_f16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmsub.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfmsub_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
+  return vfmsub_vf_f16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmsub.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfmsub_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
+  return vfmsub_vv_f16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmsub.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfmsub_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
+  return vfmsub_vf_f16m8_m(mask, vd, rs1, vs2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c
index e02df8cab9d9..00059ed25fa3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -364,3 +365,219 @@ vfloat64m8_t test_vfmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
                                    vfloat64m8_t op1, double op2, size_t vl) {
   return vfmul_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfmul_vv_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfmul_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vfmul_vv_f16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfmul_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vfmul_vf_f16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmul_vv_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfmul_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vfmul_vv_f16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfmul_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vfmul_vf_f16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmul_vv_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfmul_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vfmul_vv_f16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmul_vf_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfmul_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vfmul_vf_f16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmul_vv_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfmul_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vfmul_vv_f16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmul_vf_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfmul_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vfmul_vf_f16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmul_vv_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfmul_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vfmul_vv_f16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmul_vf_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfmul_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vfmul_vf_f16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmul_vv_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfmul_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return vfmul_vv_f16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmul_vf_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfmul_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return vfmul_vf_f16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmul_vv_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfmul_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vfmul_vv_f16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfmul_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vfmul_vf_f16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmul_vv_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfmul_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vfmul_vv_f16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfmul_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vfmul_vf_f16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmul_vv_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfmul_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vfmul_vv_f16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmul_vf_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfmul_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vfmul_vf_f16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmul_vv_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfmul_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vfmul_vv_f16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmul_vf_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfmul_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vfmul_vf_f16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmul_vv_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfmul_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vfmul_vv_f16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmul_vf_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfmul_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vfmul_vf_f16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmul_vv_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfmul_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return vfmul_vv_f16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmul_vf_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfmul_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return vfmul_vf_f16m8_m(mask, maskedoff, op1, op2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmv.c
index ab1d8b6a3f06..227fdbb44128 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmv.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -247,3 +248,165 @@ double test_vfmv_f_s_f64m8_f64(vfloat64m8_t src) {
 vfloat64m8_t test_vfmv_s_f_f64m8(vfloat64m8_t dst, double src, size_t vl) {
   return vfmv_s_f_f64m8(dst, src, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfmv_v_f_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16.i64(half [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfmv_v_f_f16mf4 (_Float16 src, size_t vl) {
+  return vfmv_v_f_f16mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmv_v_f_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16.i64(half [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfmv_v_f_f16mf2 (_Float16 src, size_t vl) {
+  return vfmv_v_f_f16mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16.i64(half [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfmv_v_f_f16m1 (_Float16 src, size_t vl) {
+  return vfmv_v_f_f16m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16.i64(half [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfmv_v_f_f16m2 (_Float16 src, size_t vl) {
+  return vfmv_v_f_f16m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16.i64(half [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfmv_v_f_f16m4 (_Float16 src, size_t vl) {
+  return vfmv_v_f_f16m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16.i64(half [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfmv_v_f_f16m8 (_Float16 src, size_t vl) {
+  return vfmv_v_f_f16m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmv_f_s_f16mf4_f16(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv1f16(<vscale x 1 x half> [[SRC:%.*]])
+// CHECK-RV64-NEXT:    ret half [[TMP0]]
+//
+_Float16 test_vfmv_f_s_f16mf4_f16 (vfloat16mf4_t src) {
+  return vfmv_f_s_f16mf4_f16(src);
+}
+
+// CHECK-RV64-LABEL: @test_vfmv_s_f_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmv.s.f.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfmv_s_f_f16mf4 (vfloat16mf4_t dest, _Float16 src, size_t vl) {
+  return vfmv_s_f_f16mf4(dest, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmv_f_s_f16mf2_f16(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv2f16(<vscale x 2 x half> [[SRC:%.*]])
+// CHECK-RV64-NEXT:    ret half [[TMP0]]
+//
+_Float16 test_vfmv_f_s_f16mf2_f16 (vfloat16mf2_t src) {
+  return vfmv_f_s_f16mf2_f16(src);
+}
+
+// CHECK-RV64-LABEL: @test_vfmv_s_f_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmv.s.f.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfmv_s_f_f16mf2 (vfloat16mf2_t dest, _Float16 src, size_t vl) {
+  return vfmv_s_f_f16mf2(dest, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmv_f_s_f16m1_f16(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv4f16(<vscale x 4 x half> [[SRC:%.*]])
+// CHECK-RV64-NEXT:    ret half [[TMP0]]
+//
+_Float16 test_vfmv_f_s_f16m1_f16 (vfloat16m1_t src) {
+  return vfmv_f_s_f16m1_f16(src);
+}
+
+// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmv.s.f.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfmv_s_f_f16m1 (vfloat16m1_t dest, _Float16 src, size_t vl) {
+  return vfmv_s_f_f16m1(dest, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmv_f_s_f16m2_f16(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv8f16(<vscale x 8 x half> [[SRC:%.*]])
+// CHECK-RV64-NEXT:    ret half [[TMP0]]
+//
+_Float16 test_vfmv_f_s_f16m2_f16 (vfloat16m2_t src) {
+  return vfmv_f_s_f16m2_f16(src);
+}
+
+// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmv.s.f.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfmv_s_f_f16m2 (vfloat16m2_t dest, _Float16 src, size_t vl) {
+  return vfmv_s_f_f16m2(dest, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmv_f_s_f16m4_f16(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv16f16(<vscale x 16 x half> [[SRC:%.*]])
+// CHECK-RV64-NEXT:    ret half [[TMP0]]
+//
+_Float16 test_vfmv_f_s_f16m4_f16 (vfloat16m4_t src) {
+  return vfmv_f_s_f16m4_f16(src);
+}
+
+// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmv.s.f.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfmv_s_f_f16m4 (vfloat16m4_t dest, _Float16 src, size_t vl) {
+  return vfmv_s_f_f16m4(dest, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfmv_f_s_f16m8_f16(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv32f16(<vscale x 32 x half> [[SRC:%.*]])
+// CHECK-RV64-NEXT:    ret half [[TMP0]]
+//
+_Float16 test_vfmv_f_s_f16m8_f16 (vfloat16m8_t src) {
+  return vfmv_f_s_f16m8_f16(src);
+}
+
+// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmv.s.f.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfmv_s_f_f16m8 (vfloat16m8_t dest, _Float16 src, size_t vl) {
+  return vfmv_s_f_f16m8(dest, src, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c
index bda61e684acf..6bc00343bb5a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -1008,3 +1009,795 @@ vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_m(vbool8_t mask,
                                            vfloat64m8_t src, size_t vl) {
   return vfncvt_rod_f_f_w_f32m4_m(mask, maskedoff, src, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_i8mf8 (vfloat16mf4_t src, size_t vl) {
+  return vfncvt_x_f_w_i8mf8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8 (vfloat16mf4_t src, size_t vl) {
+  return vfncvt_rtz_x_f_w_i8mf8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_i8mf4 (vfloat16mf2_t src, size_t vl) {
+  return vfncvt_x_f_w_i8mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4 (vfloat16mf2_t src, size_t vl) {
+  return vfncvt_rtz_x_f_w_i8mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_i8mf2 (vfloat16m1_t src, size_t vl) {
+  return vfncvt_x_f_w_i8mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2 (vfloat16m1_t src, size_t vl) {
+  return vfncvt_rtz_x_f_w_i8mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_i8m1 (vfloat16m2_t src, size_t vl) {
+  return vfncvt_x_f_w_i8m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_i8m1 (vfloat16m2_t src, size_t vl) {
+  return vfncvt_rtz_x_f_w_i8m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_i8m2 (vfloat16m4_t src, size_t vl) {
+  return vfncvt_x_f_w_i8m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_i8m2 (vfloat16m4_t src, size_t vl) {
+  return vfncvt_rtz_x_f_w_i8m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16.i64(<vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_i8m4 (vfloat16m8_t src, size_t vl) {
+  return vfncvt_x_f_w_i8m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16.i64(<vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_i8m4 (vfloat16m8_t src, size_t vl) {
+  return vfncvt_rtz_x_f_w_i8m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_u8mf8 (vfloat16mf4_t src, size_t vl) {
+  return vfncvt_xu_f_w_u8mf8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8 (vfloat16mf4_t src, size_t vl) {
+  return vfncvt_rtz_xu_f_w_u8mf8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_u8mf4 (vfloat16mf2_t src, size_t vl) {
+  return vfncvt_xu_f_w_u8mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4 (vfloat16mf2_t src, size_t vl) {
+  return vfncvt_rtz_xu_f_w_u8mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_u8mf2 (vfloat16m1_t src, size_t vl) {
+  return vfncvt_xu_f_w_u8mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2 (vfloat16m1_t src, size_t vl) {
+  return vfncvt_rtz_xu_f_w_u8mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_u8m1 (vfloat16m2_t src, size_t vl) {
+  return vfncvt_xu_f_w_u8m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1 (vfloat16m2_t src, size_t vl) {
+  return vfncvt_rtz_xu_f_w_u8m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_u8m2 (vfloat16m4_t src, size_t vl) {
+  return vfncvt_xu_f_w_u8m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2 (vfloat16m4_t src, size_t vl) {
+  return vfncvt_rtz_xu_f_w_u8m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16.i64(<vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_u8m4 (vfloat16m8_t src, size_t vl) {
+  return vfncvt_xu_f_w_u8m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16.i64(<vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4 (vfloat16m8_t src, size_t vl) {
+  return vfncvt_rtz_xu_f_w_u8m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfncvt_f_x_w_f16mf4 (vint32mf2_t src, size_t vl) {
+  return vfncvt_f_x_w_f16mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfncvt_f_x_w_f16mf2 (vint32m1_t src, size_t vl) {
+  return vfncvt_f_x_w_f16mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfncvt_f_x_w_f16m1 (vint32m2_t src, size_t vl) {
+  return vfncvt_f_x_w_f16m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfncvt_f_x_w_f16m2 (vint32m4_t src, size_t vl) {
+  return vfncvt_f_x_w_f16m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfncvt_f_x_w_f16m4 (vint32m8_t src, size_t vl) {
+  return vfncvt_f_x_w_f16m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4 (vuint32mf2_t src, size_t vl) {
+  return vfncvt_f_xu_w_f16mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2 (vuint32m1_t src, size_t vl) {
+  return vfncvt_f_xu_w_f16mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfncvt_f_xu_w_f16m1 (vuint32m2_t src, size_t vl) {
+  return vfncvt_f_xu_w_f16m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfncvt_f_xu_w_f16m2 (vuint32m4_t src, size_t vl) {
+  return vfncvt_f_xu_w_f16m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfncvt_f_xu_w_f16m4 (vuint32m8_t src, size_t vl) {
+  return vfncvt_f_xu_w_f16m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfncvt_f_f_w_f16mf4 (vfloat32mf2_t src, size_t vl) {
+  return vfncvt_f_f_w_f16mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4 (vfloat32mf2_t src, size_t vl) {
+  return vfncvt_rod_f_f_w_f16mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfncvt_f_f_w_f16mf2 (vfloat32m1_t src, size_t vl) {
+  return vfncvt_f_f_w_f16mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2 (vfloat32m1_t src, size_t vl) {
+  return vfncvt_rod_f_f_w_f16mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfncvt_f_f_w_f16m1 (vfloat32m2_t src, size_t vl) {
+  return vfncvt_f_f_w_f16m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1 (vfloat32m2_t src, size_t vl) {
+  return vfncvt_rod_f_f_w_f16m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfncvt_f_f_w_f16m2 (vfloat32m4_t src, size_t vl) {
+  return vfncvt_f_f_w_f16m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2 (vfloat32m4_t src, size_t vl) {
+  return vfncvt_rod_f_f_w_f16m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfncvt_f_f_w_f16m4 (vfloat32m8_t src, size_t vl) {
+  return vfncvt_f_f_w_f16m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4 (vfloat32m8_t src, size_t vl) {
+  return vfncvt_rod_f_f_w_f16m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) {
+  return vfncvt_x_f_w_i8mf8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) {
+  return vfncvt_rtz_x_f_w_i8mf8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) {
+  return vfncvt_x_f_w_i8mf4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) {
+  return vfncvt_rtz_x_f_w_i8mf4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) {
+  return vfncvt_x_f_w_i8mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) {
+  return vfncvt_rtz_x_f_w_i8mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) {
+  return vfncvt_x_f_w_i8m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) {
+  return vfncvt_rtz_x_f_w_i8m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) {
+  return vfncvt_x_f_w_i8m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) {
+  return vfncvt_rtz_x_f_w_i8m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) {
+  return vfncvt_x_f_w_i8m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) {
+  return vfncvt_rtz_x_f_w_i8m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) {
+  return vfncvt_xu_f_w_u8mf8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) {
+  return vfncvt_rtz_xu_f_w_u8mf8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) {
+  return vfncvt_xu_f_w_u8mf4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) {
+  return vfncvt_rtz_xu_f_w_u8mf4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) {
+  return vfncvt_xu_f_w_u8mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) {
+  return vfncvt_rtz_xu_f_w_u8mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) {
+  return vfncvt_xu_f_w_u8m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) {
+  return vfncvt_rtz_xu_f_w_u8m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) {
+  return vfncvt_xu_f_w_u8m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) {
+  return vfncvt_rtz_xu_f_w_u8m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) {
+  return vfncvt_xu_f_w_u8m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) {
+  return vfncvt_rtz_xu_f_w_u8m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) {
+  return vfncvt_f_x_w_f16mf4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) {
+  return vfncvt_f_x_w_f16mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfncvt_f_x_w_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) {
+  return vfncvt_f_x_w_f16m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv8f16.nxv8i32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfncvt_f_x_w_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) {
+  return vfncvt_f_x_w_f16m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv16f16.nxv16i32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfncvt_f_x_w_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) {
+  return vfncvt_f_x_w_f16m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) {
+  return vfncvt_f_xu_w_f16mf4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) {
+  return vfncvt_f_xu_w_f16mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfncvt_f_xu_w_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) {
+  return vfncvt_f_xu_w_f16m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfncvt_f_xu_w_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) {
+  return vfncvt_f_xu_w_f16m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfncvt_f_xu_w_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) {
+  return vfncvt_f_xu_w_f16m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) {
+  return vfncvt_f_f_w_f16mf4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) {
+  return vfncvt_rod_f_f_w_f16mf4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) {
+  return vfncvt_f_f_w_f16mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) {
+  return vfncvt_rod_f_f_w_f16mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfncvt_f_f_w_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) {
+  return vfncvt_f_f_w_f16m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) {
+  return vfncvt_rod_f_f_w_f16m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfncvt_f_f_w_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) {
+  return vfncvt_f_f_w_f16m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) {
+  return vfncvt_rod_f_f_w_f16m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfncvt_f_f_w_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) {
+  return vfncvt_f_f_w_f16m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) {
+  return vfncvt_rod_f_f_w_f16m4_m(mask, maskedoff, src, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c
index 98f37bad66aa..d705f7a03e41 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -167,3 +168,110 @@ vfloat64m8_t test_vfneg_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat
   return vfneg_v_f64m8_m(mask, maskedoff, op1, vl);
 }
 
+// CHECK-RV64-LABEL: @test_vfneg_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfneg_v_f16mf4 (vfloat16mf4_t op1, size_t vl) {
+  return vfneg_v_f16mf4(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfneg_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfneg_v_f16mf2 (vfloat16mf2_t op1, size_t vl) {
+  return vfneg_v_f16mf2(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfneg_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfneg_v_f16m1 (vfloat16m1_t op1, size_t vl) {
+  return vfneg_v_f16m1(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfneg_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfneg_v_f16m2 (vfloat16m2_t op1, size_t vl) {
+  return vfneg_v_f16m2(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfneg_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfneg_v_f16m4 (vfloat16m4_t op1, size_t vl) {
+  return vfneg_v_f16m4(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfneg_v_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfneg_v_f16m8 (vfloat16m8_t op1, size_t vl) {
+  return vfneg_v_f16m8(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfneg_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfneg_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
+  return vfneg_v_f16mf4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfneg_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfneg_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
+  return vfneg_v_f16mf2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfneg_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfneg_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
+  return vfneg_v_f16m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfneg_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfneg_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
+  return vfneg_v_f16m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfneg_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfneg_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
+  return vfneg_v_f16m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfneg_v_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfneg_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
+  return vfneg_v_f16m8_m(mask, maskedoff, op1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmacc.c
index af9cdbc07328..b48bd2ee21e7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmacc.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -374,3 +375,219 @@ vfloat64m8_t test_vfnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
                                      double op1, vfloat64m8_t op2, size_t vl) {
   return vfnmacc_vf_f64m8_m(mask, acc, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfnmacc_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfnmacc_vv_f16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfnmacc_vf_f16mf4 (vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfnmacc_vf_f16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmacc.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfnmacc_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfnmacc_vv_f16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmacc.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfnmacc_vf_f16mf2 (vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfnmacc_vf_f16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmacc.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfnmacc_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
+  return vfnmacc_vv_f16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmacc.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfnmacc_vf_f16m1 (vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
+  return vfnmacc_vf_f16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmacc.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfnmacc_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
+  return vfnmacc_vv_f16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmacc.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfnmacc_vf_f16m2 (vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
+  return vfnmacc_vf_f16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmacc.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfnmacc_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
+  return vfnmacc_vv_f16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmacc.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfnmacc_vf_f16m4 (vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
+  return vfnmacc_vf_f16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmacc.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfnmacc_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
+  return vfnmacc_vv_f16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmacc.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfnmacc_vf_f16m8 (vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
+  return vfnmacc_vf_f16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfnmacc_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfnmacc_vv_f16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmacc.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfnmacc_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfnmacc_vf_f16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfnmacc_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfnmacc_vv_f16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmacc.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfnmacc_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfnmacc_vf_f16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfnmacc_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
+  return vfnmacc_vv_f16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmacc.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfnmacc_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
+  return vfnmacc_vf_f16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfnmacc_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
+  return vfnmacc_vv_f16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmacc.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfnmacc_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
+  return vfnmacc_vf_f16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfnmacc_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
+  return vfnmacc_vv_f16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmacc.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfnmacc_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
+  return vfnmacc_vf_f16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmacc.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfnmacc_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
+  return vfnmacc_vv_f16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmacc.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfnmacc_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
+  return vfnmacc_vf_f16m8_m(mask, vd, rs1, vs2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmadd.c
index b9e76dcafe75..6192539a08e4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmadd.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -374,3 +375,219 @@ vfloat64m8_t test_vfnmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
                                      double op1, vfloat64m8_t op2, size_t vl) {
   return vfnmadd_vf_f64m8_m(mask, acc, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfnmadd_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfnmadd_vv_f16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfnmadd_vf_f16mf4 (vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfnmadd_vf_f16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmadd.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfnmadd_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfnmadd_vv_f16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmadd.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfnmadd_vf_f16mf2 (vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfnmadd_vf_f16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmadd.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfnmadd_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
+  return vfnmadd_vv_f16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmadd.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfnmadd_vf_f16m1 (vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
+  return vfnmadd_vf_f16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmadd.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfnmadd_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
+  return vfnmadd_vv_f16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmadd.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfnmadd_vf_f16m2 (vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
+  return vfnmadd_vf_f16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmadd.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfnmadd_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
+  return vfnmadd_vv_f16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmadd.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfnmadd_vf_f16m4 (vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
+  return vfnmadd_vf_f16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmadd.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfnmadd_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
+  return vfnmadd_vv_f16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmadd.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfnmadd_vf_f16m8 (vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
+  return vfnmadd_vf_f16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfnmadd_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfnmadd_vv_f16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmadd.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfnmadd_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfnmadd_vf_f16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfnmadd_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfnmadd_vv_f16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmadd.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfnmadd_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfnmadd_vf_f16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfnmadd_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
+  return vfnmadd_vv_f16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmadd.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfnmadd_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
+  return vfnmadd_vf_f16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfnmadd_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
+  return vfnmadd_vv_f16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmadd.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfnmadd_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
+  return vfnmadd_vf_f16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfnmadd_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
+  return vfnmadd_vv_f16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmadd.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfnmadd_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
+  return vfnmadd_vf_f16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmadd.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfnmadd_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
+  return vfnmadd_vv_f16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmadd.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfnmadd_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
+  return vfnmadd_vf_f16m8_m(mask, vd, rs1, vs2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsac.c
index 162726dd5e13..b20a71d92e13 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsac.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -374,3 +375,219 @@ vfloat64m8_t test_vfnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
                                      double op1, vfloat64m8_t op2, size_t vl) {
   return vfnmsac_vf_f64m8_m(mask, acc, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfnmsac_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfnmsac_vv_f16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfnmsac_vf_f16mf4 (vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfnmsac_vf_f16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmsac.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfnmsac_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfnmsac_vv_f16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmsac.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfnmsac_vf_f16mf2 (vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfnmsac_vf_f16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmsac.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfnmsac_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
+  return vfnmsac_vv_f16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmsac.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfnmsac_vf_f16m1 (vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
+  return vfnmsac_vf_f16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmsac.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfnmsac_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
+  return vfnmsac_vv_f16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmsac.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfnmsac_vf_f16m2 (vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
+  return vfnmsac_vf_f16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmsac.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfnmsac_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
+  return vfnmsac_vv_f16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmsac.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfnmsac_vf_f16m4 (vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
+  return vfnmsac_vf_f16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmsac.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfnmsac_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
+  return vfnmsac_vv_f16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmsac.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfnmsac_vf_f16m8 (vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
+  return vfnmsac_vf_f16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfnmsac_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfnmsac_vv_f16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmsac.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfnmsac_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfnmsac_vf_f16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfnmsac_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfnmsac_vv_f16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmsac.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfnmsac_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfnmsac_vf_f16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfnmsac_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
+  return vfnmsac_vv_f16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmsac.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfnmsac_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
+  return vfnmsac_vf_f16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfnmsac_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
+  return vfnmsac_vv_f16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmsac.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfnmsac_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
+  return vfnmsac_vf_f16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfnmsac_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
+  return vfnmsac_vv_f16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmsac.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfnmsac_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
+  return vfnmsac_vf_f16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmsac.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfnmsac_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
+  return vfnmsac_vv_f16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmsac.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfnmsac_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
+  return vfnmsac_vf_f16m8_m(mask, vd, rs1, vs2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsub.c
index 8a0269efe414..de51dc6e2ac1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsub.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -374,3 +375,225 @@ vfloat64m8_t test_vfnmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
                                      double op1, vfloat64m8_t op2, size_t vl) {
   return vfnmsub_vf_f64m8_m(mask, acc, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfnmsub_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfnmsub_vv_f16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfnmsub_vf_f16mf4 (vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfnmsub_vf_f16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfnmsub_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfnmsub_vv_f16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfnmsub_vf_f16mf2 (vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfnmsub_vf_f16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfnmsub_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
+  return vfnmsub_vv_f16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfnmsub_vf_f16m1 (vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
+  return vfnmsub_vf_f16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfnmsub_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
+  return vfnmsub_vv_f16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfnmsub_vf_f16m2 (vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
+  return vfnmsub_vf_f16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfnmsub_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
+  return vfnmsub_vv_f16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfnmsub_vf_f16m4 (vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
+  return vfnmsub_vf_f16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmsub.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfnmsub_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
+  return vfnmsub_vv_f16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmsub.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfnmsub_vf_f16m8 (vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
+  return vfnmsub_vf_f16m8(vd, rs1, vs2, vl);
+}
+
+
+
+
+
+
+
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfnmsub_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfnmsub_vv_f16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfnmsub_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfnmsub_vf_f16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfnmsub_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfnmsub_vv_f16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfnmsub_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfnmsub_vf_f16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfnmsub_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
+  return vfnmsub_vv_f16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfnmsub_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
+  return vfnmsub_vf_f16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfnmsub_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
+  return vfnmsub_vv_f16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfnmsub_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
+  return vfnmsub_vf_f16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfnmsub_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
+  return vfnmsub_vv_f16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfnmsub_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
+  return vfnmsub_vf_f16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmsub.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfnmsub_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
+  return vfnmsub_vv_f16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmsub.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfnmsub_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {
+  return vfnmsub_vf_f16m8_m(mask, vd, rs1, vs2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c
index e3a67d320c94..5ba1f1a19b3b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -175,3 +176,111 @@ vfloat64m8_t test_vfrdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
                                     vfloat64m8_t op1, double op2, size_t vl) {
   return vfrdiv_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrdiv.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfrdiv_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vfrdiv_vf_f16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrdiv.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfrdiv_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vfrdiv_vf_f16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrdiv.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfrdiv_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vfrdiv_vf_f16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrdiv.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfrdiv_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vfrdiv_vf_f16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrdiv.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfrdiv_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vfrdiv_vf_f16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrdiv.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfrdiv_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return vfrdiv_vf_f16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrdiv.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfrdiv_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vfrdiv_vf_f16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrdiv.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfrdiv_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vfrdiv_vf_f16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrdiv.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfrdiv_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vfrdiv_vf_f16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrdiv.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfrdiv_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vfrdiv_vf_f16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrdiv.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfrdiv_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vfrdiv_vf_f16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrdiv.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfrdiv_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return vfrdiv_vf_f16m8_m(mask, maskedoff, op1, op2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c
index 09db4584aef8..c73f94553fe5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -175,3 +176,111 @@ vfloat64m8_t test_vfrec7_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
                                    vfloat64m8_t op1, size_t vl) {
   return vfrec7_v_f64m8_m(mask, maskedoff, op1, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfrec7_v_f16mf4 (vfloat16mf4_t op1, size_t vl) {
+  return vfrec7_v_f16mf4(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfrec7_v_f16mf2 (vfloat16mf2_t op1, size_t vl) {
+  return vfrec7_v_f16mf2(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrec7_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfrec7_v_f16m1 (vfloat16m1_t op1, size_t vl) {
+  return vfrec7_v_f16m1(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrec7_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfrec7_v_f16m2 (vfloat16m2_t op1, size_t vl) {
+  return vfrec7_v_f16m2(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrec7_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfrec7_v_f16m4 (vfloat16m4_t op1, size_t vl) {
+  return vfrec7_v_f16m4(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrec7_v_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfrec7_v_f16m8 (vfloat16m8_t op1, size_t vl) {
+  return vfrec7_v_f16m8(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfrec7_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
+  return vfrec7_v_f16mf4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfrec7_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
+  return vfrec7_v_f16mf2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrec7_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfrec7_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
+  return vfrec7_v_f16m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrec7_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfrec7_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
+  return vfrec7_v_f16m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrec7_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfrec7_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
+  return vfrec7_v_f16m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrec7_v_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfrec7_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
+  return vfrec7_v_f16m8_m(mask, maskedoff, op1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c
index 72f68e56123a..62911f9cd606 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -194,3 +195,111 @@ vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
                                             vfloat64m1_t scalar, size_t vl) {
   return vfredmax_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf4_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1 (vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax_vs_f16mf4_f16m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1 (vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax_vs_f16mf2_f16m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredmax_vs_f16m1_f16m1 (vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax_vs_f16m1_f16m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredmax_vs_f16m2_f16m1 (vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax_vs_f16m2_f16m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredmax_vs_f16m4_f16m1 (vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax_vs_f16m4_f16m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredmax_vs_f16m8_f16m1 (vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax_vs_f16m8_f16m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf4_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_m (vbool64_t mask, vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax_vs_f16mf4_f16m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_m (vbool32_t mask, vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax_vs_f16mf2_f16m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_m (vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_m (vbool8_t mask, vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_m (vbool4_t mask, vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_m (vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c
index 2f7808be8153..1959fe52afa2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -194,3 +195,111 @@ vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
                                             vfloat64m1_t scalar, size_t vl) {
   return vfredmin_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf4_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1 (vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin_vs_f16mf4_f16m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1 (vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin_vs_f16mf2_f16m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredmin_vs_f16m1_f16m1 (vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin_vs_f16m1_f16m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredmin_vs_f16m2_f16m1 (vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin_vs_f16m2_f16m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredmin_vs_f16m4_f16m1 (vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin_vs_f16m4_f16m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredmin_vs_f16m8_f16m1 (vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin_vs_f16m8_f16m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf4_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_m (vbool64_t mask, vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin_vs_f16mf4_f16m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_m (vbool32_t mask, vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin_vs_f16mf2_f16m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_m (vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_m (vbool8_t mask, vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_m (vbool4_t mask, vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_m (vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c
index a72ad9027199..6c1ad4e76593 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -392,3 +393,219 @@ vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
                                              vfloat64m1_t scalar, size_t vl) {
   return vfredosum_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f16mf4_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredsum_vs_f16mf4_f16m1 (vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredsum_vs_f16mf4_f16m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f16mf2_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredsum_vs_f16mf2_f16m1 (vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredsum_vs_f16mf2_f16m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f16m1_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredsum_vs_f16m1_f16m1 (vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredsum_vs_f16m1_f16m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f16m2_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredsum_vs_f16m2_f16m1 (vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredsum_vs_f16m2_f16m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f16m4_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredsum_vs_f16m4_f16m1 (vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredsum_vs_f16m4_f16m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f16m8_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredsum_vs_f16m8_f16m1 (vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredsum_vs_f16m8_f16m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f16mf4_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredsum_vs_f16mf4_f16m1_m (vbool64_t mask, vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredsum_vs_f16mf4_f16m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f16mf2_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredsum_vs_f16mf2_f16m1_m (vbool32_t mask, vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredsum_vs_f16mf2_f16m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f16m1_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredsum_vs_f16m1_f16m1_m (vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredsum_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f16m2_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredsum_vs_f16m2_f16m1_m (vbool8_t mask, vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredsum_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f16m4_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredsum_vs_f16m4_f16m1_m (vbool4_t mask, vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredsum_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f16m8_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredsum_vs_f16m8_f16m1_m (vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredsum_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1 (vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum_vs_f16mf4_f16m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1 (vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum_vs_f16mf2_f16m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredosum_vs_f16m1_f16m1 (vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum_vs_f16m1_f16m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredosum_vs_f16m2_f16m1 (vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum_vs_f16m2_f16m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredosum_vs_f16m4_f16m1 (vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum_vs_f16m4_f16m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredosum_vs_f16m8_f16m1 (vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum_vs_f16m8_f16m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_m (vbool64_t mask, vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum_vs_f16mf4_f16m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_m (vbool32_t mask, vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum_vs_f16mf2_f16m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_m (vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_m (vbool8_t mask, vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_m (vbool4_t mask, vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m (vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c
index ef78897d2c97..dc414060709a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -175,3 +176,111 @@ vfloat64m8_t test_vfrsqrt7_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
                                      vfloat64m8_t op1, size_t vl) {
   return vfrsqrt7_v_f64m8_m(mask, maskedoff, op1, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfrsqrt7_v_f16mf4 (vfloat16mf4_t op1, size_t vl) {
+  return vfrsqrt7_v_f16mf4(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrsqrt7.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfrsqrt7_v_f16mf2 (vfloat16mf2_t op1, size_t vl) {
+  return vfrsqrt7_v_f16mf2(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrsqrt7.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfrsqrt7_v_f16m1 (vfloat16m1_t op1, size_t vl) {
+  return vfrsqrt7_v_f16m1(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrsqrt7.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfrsqrt7_v_f16m2 (vfloat16m2_t op1, size_t vl) {
+  return vfrsqrt7_v_f16m2(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrsqrt7.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfrsqrt7_v_f16m4 (vfloat16m4_t op1, size_t vl) {
+  return vfrsqrt7_v_f16m4(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrsqrt7.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfrsqrt7_v_f16m8 (vfloat16m8_t op1, size_t vl) {
+  return vfrsqrt7_v_f16m8(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfrsqrt7_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
+  return vfrsqrt7_v_f16mf4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrsqrt7.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfrsqrt7_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
+  return vfrsqrt7_v_f16mf2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrsqrt7.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfrsqrt7_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
+  return vfrsqrt7_v_f16m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrsqrt7.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfrsqrt7_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
+  return vfrsqrt7_v_f16m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrsqrt7.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfrsqrt7_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
+  return vfrsqrt7_v_f16m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrsqrt7.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfrsqrt7_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
+  return vfrsqrt7_v_f16m8_m(mask, maskedoff, op1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c
index 832198e27001..4f5561380f19 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -175,3 +176,111 @@ vfloat64m8_t test_vfrsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
                                     vfloat64m8_t op1, double op2, size_t vl) {
   return vfrsub_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfrsub_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vfrsub_vf_f16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrsub.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfrsub_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vfrsub_vf_f16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrsub.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfrsub_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vfrsub_vf_f16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrsub.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfrsub_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vfrsub_vf_f16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrsub.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfrsub_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vfrsub_vf_f16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrsub.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfrsub_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return vfrsub_vf_f16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrsub.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfrsub_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vfrsub_vf_f16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrsub.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfrsub_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vfrsub_vf_f16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrsub.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfrsub_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vfrsub_vf_f16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrsub.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfrsub_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vfrsub_vf_f16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrsub.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfrsub_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vfrsub_vf_f16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrsub.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfrsub_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return vfrsub_vf_f16m8_m(mask, maskedoff, op1, op2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c
index eaa7cbbd950b..13ab571bd197 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -1086,3 +1087,651 @@ vfloat64m8_t test_vfsgnjx_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
                                      vfloat64m8_t op1, double op2, size_t vl) {
   return vfsgnjx_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfsgnj_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vfsgnj_vv_f16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfsgnj_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vfsgnj_vf_f16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfsgnj_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vfsgnj_vv_f16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfsgnj_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vfsgnj_vf_f16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfsgnj_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vfsgnj_vv_f16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfsgnj_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vfsgnj_vf_f16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfsgnj_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vfsgnj_vv_f16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfsgnj_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vfsgnj_vf_f16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfsgnj_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vfsgnj_vv_f16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfsgnj_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vfsgnj_vf_f16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfsgnj_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return vfsgnj_vv_f16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfsgnj_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return vfsgnj_vf_f16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfsgnjn_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vfsgnjn_vv_f16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfsgnjn_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vfsgnjn_vf_f16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfsgnjn_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vfsgnjn_vv_f16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfsgnjn_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vfsgnjn_vf_f16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfsgnjn_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vfsgnjn_vv_f16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfsgnjn_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vfsgnjn_vf_f16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfsgnjn_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vfsgnjn_vv_f16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfsgnjn_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vfsgnjn_vf_f16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfsgnjn_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vfsgnjn_vv_f16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfsgnjn_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vfsgnjn_vf_f16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfsgnjn_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return vfsgnjn_vv_f16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfsgnjn_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return vfsgnjn_vf_f16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfsgnjx_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vfsgnjx_vv_f16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfsgnjx_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vfsgnjx_vf_f16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfsgnjx_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vfsgnjx_vv_f16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfsgnjx_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vfsgnjx_vf_f16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfsgnjx_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vfsgnjx_vv_f16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfsgnjx_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vfsgnjx_vf_f16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfsgnjx_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vfsgnjx_vv_f16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfsgnjx_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vfsgnjx_vf_f16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfsgnjx_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vfsgnjx_vv_f16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfsgnjx_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vfsgnjx_vf_f16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfsgnjx_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return vfsgnjx_vv_f16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfsgnjx_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return vfsgnjx_vf_f16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfsgnj_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vfsgnj_vv_f16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfsgnj_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vfsgnj_vf_f16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfsgnj_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vfsgnj_vv_f16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfsgnj_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vfsgnj_vf_f16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfsgnj_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vfsgnj_vv_f16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfsgnj_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vfsgnj_vf_f16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfsgnj_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vfsgnj_vv_f16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfsgnj_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vfsgnj_vf_f16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfsgnj_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vfsgnj_vv_f16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfsgnj_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vfsgnj_vf_f16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfsgnj_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return vfsgnj_vv_f16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfsgnj_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return vfsgnj_vf_f16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfsgnjn_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vfsgnjn_vv_f16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfsgnjn_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vfsgnjn_vf_f16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfsgnjn_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vfsgnjn_vv_f16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfsgnjn_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vfsgnjn_vf_f16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfsgnjn_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vfsgnjn_vv_f16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfsgnjn_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vfsgnjn_vf_f16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfsgnjn_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vfsgnjn_vv_f16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfsgnjn_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vfsgnjn_vf_f16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfsgnjn_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vfsgnjn_vv_f16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfsgnjn_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vfsgnjn_vf_f16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfsgnjn_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return vfsgnjn_vv_f16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfsgnjn_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return vfsgnjn_vf_f16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfsgnjx_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vfsgnjx_vv_f16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfsgnjx_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vfsgnjx_vf_f16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfsgnjx_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vfsgnjx_vv_f16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfsgnjx_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vfsgnjx_vf_f16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfsgnjx_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vfsgnjx_vv_f16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfsgnjx_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vfsgnjx_vf_f16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfsgnjx_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vfsgnjx_vv_f16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfsgnjx_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vfsgnjx_vf_f16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfsgnjx_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vfsgnjx_vv_f16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfsgnjx_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vfsgnjx_vf_f16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfsgnjx_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return vfsgnjx_vv_f16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfsgnjx_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return vfsgnjx_vf_f16m8_m(mask, maskedoff, op1, op2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c
index 83e2b1812a2e..ca951c0f1a76 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -199,3 +200,111 @@ vfloat64m8_t test_vfslide1down_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
                                           size_t vl) {
   return vfslide1down_vf_f64m8_m(mask, maskedoff, src, value, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16.i64(<vscale x 1 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfslide1down_vf_f16mf4 (vfloat16mf4_t src, _Float16 value, size_t vl) {
+  return vfslide1down_vf_f16mf4(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfslide1down.nxv2f16.f16.i64(<vscale x 2 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfslide1down_vf_f16mf2 (vfloat16mf2_t src, _Float16 value, size_t vl) {
+  return vfslide1down_vf_f16mf2(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfslide1down.nxv4f16.f16.i64(<vscale x 4 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfslide1down_vf_f16m1 (vfloat16m1_t src, _Float16 value, size_t vl) {
+  return vfslide1down_vf_f16m1(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfslide1down.nxv8f16.f16.i64(<vscale x 8 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfslide1down_vf_f16m2 (vfloat16m2_t src, _Float16 value, size_t vl) {
+  return vfslide1down_vf_f16m2(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfslide1down.nxv16f16.f16.i64(<vscale x 16 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfslide1down_vf_f16m4 (vfloat16m4_t src, _Float16 value, size_t vl) {
+  return vfslide1down_vf_f16m4(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfslide1down.nxv32f16.f16.i64(<vscale x 32 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfslide1down_vf_f16m8 (vfloat16m8_t src, _Float16 value, size_t vl) {
+  return vfslide1down_vf_f16m8(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfslide1down.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], half [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfslide1down_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl) {
+  return vfslide1down_vf_f16mf4_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfslide1down.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], half [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfslide1down_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl) {
+  return vfslide1down_vf_f16mf2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfslide1down.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], half [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfslide1down_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl) {
+  return vfslide1down_vf_f16m1_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfslide1down.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], half [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfslide1down_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl) {
+  return vfslide1down_vf_f16m2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfslide1down.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], half [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfslide1down_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl) {
+  return vfslide1down_vf_f16m4_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfslide1down.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[SRC:%.*]], half [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfslide1down_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) {
+  return vfslide1down_vf_f16m8_m(mask, maskedoff, src, value, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c
index 771c08646a03..1fee5aaac472 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -194,3 +195,111 @@ vfloat64m8_t test_vfslide1up_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
                                         size_t vl) {
   return vfslide1up_vf_f64m8_m(mask, maskedoff, src, value, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16.i64(<vscale x 1 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfslide1up_vf_f16mf4 (vfloat16mf4_t src, _Float16 value, size_t vl) {
+  return vfslide1up_vf_f16mf4(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfslide1up.nxv2f16.f16.i64(<vscale x 2 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfslide1up_vf_f16mf2 (vfloat16mf2_t src, _Float16 value, size_t vl) {
+  return vfslide1up_vf_f16mf2(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfslide1up.nxv4f16.f16.i64(<vscale x 4 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfslide1up_vf_f16m1 (vfloat16m1_t src, _Float16 value, size_t vl) {
+  return vfslide1up_vf_f16m1(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfslide1up.nxv8f16.f16.i64(<vscale x 8 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfslide1up_vf_f16m2 (vfloat16m2_t src, _Float16 value, size_t vl) {
+  return vfslide1up_vf_f16m2(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfslide1up.nxv16f16.f16.i64(<vscale x 16 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfslide1up_vf_f16m4 (vfloat16m4_t src, _Float16 value, size_t vl) {
+  return vfslide1up_vf_f16m4(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfslide1up.nxv32f16.f16.i64(<vscale x 32 x half> [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfslide1up_vf_f16m8 (vfloat16m8_t src, _Float16 value, size_t vl) {
+  return vfslide1up_vf_f16m8(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], half [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfslide1up_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl) {
+  return vfslide1up_vf_f16mf4_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfslide1up.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], half [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfslide1up_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl) {
+  return vfslide1up_vf_f16mf2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfslide1up.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], half [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfslide1up_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl) {
+  return vfslide1up_vf_f16m1_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfslide1up.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], half [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfslide1up_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl) {
+  return vfslide1up_vf_f16m2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfslide1up.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], half [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfslide1up_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl) {
+  return vfslide1up_vf_f16m4_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfslide1up.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[SRC:%.*]], half [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfslide1up_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) {
+  return vfslide1up_vf_f16m8_m(mask, maskedoff, src, value, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c
index 7d2a0b336627..345378c8d8e0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -175,3 +176,111 @@ vfloat64m8_t test_vfsqrt_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
                                    vfloat64m8_t op1, size_t vl) {
   return vfsqrt_v_f64m8_m(mask, maskedoff, op1, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfsqrt_v_f16mf4 (vfloat16mf4_t op1, size_t vl) {
+  return vfsqrt_v_f16mf4(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfsqrt_v_f16mf2 (vfloat16mf2_t op1, size_t vl) {
+  return vfsqrt_v_f16mf2(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfsqrt_v_f16m1 (vfloat16m1_t op1, size_t vl) {
+  return vfsqrt_v_f16m1(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfsqrt_v_f16m2 (vfloat16m2_t op1, size_t vl) {
+  return vfsqrt_v_f16m2(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfsqrt_v_f16m4 (vfloat16m4_t op1, size_t vl) {
+  return vfsqrt_v_f16m4(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfsqrt_v_f16m8 (vfloat16m8_t op1, size_t vl) {
+  return vfsqrt_v_f16m8(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfsqrt_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
+  return vfsqrt_v_f16mf4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfsqrt_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
+  return vfsqrt_v_f16mf2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfsqrt_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
+  return vfsqrt_v_f16m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfsqrt_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
+  return vfsqrt_v_f16m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfsqrt_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
+  return vfsqrt_v_f16m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfsqrt_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
+  return vfsqrt_v_f16m8_m(mask, maskedoff, op1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c
index dee966722bc3..7e3831f1c8b4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -364,3 +365,219 @@ vfloat64m8_t test_vfsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
                                    vfloat64m8_t op1, double op2, size_t vl) {
   return vfsub_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfsub_vv_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfsub_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vfsub_vv_f16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfsub_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vfsub_vf_f16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsub_vv_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfsub_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vfsub_vv_f16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfsub_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vfsub_vf_f16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsub_vv_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfsub_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vfsub_vv_f16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsub_vf_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfsub_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vfsub_vf_f16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsub_vv_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfsub_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vfsub_vv_f16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsub_vf_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfsub_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vfsub_vf_f16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsub_vv_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfsub_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vfsub_vv_f16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsub_vf_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfsub_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vfsub_vf_f16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsub_vv_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfsub_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return vfsub_vv_f16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsub_vf_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfsub_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return vfsub_vf_f16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsub_vv_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfsub_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vfsub_vv_f16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfsub_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vfsub_vf_f16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsub_vv_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfsub_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vfsub_vv_f16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfsub_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vfsub_vf_f16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsub_vv_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfsub_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vfsub_vv_f16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsub_vf_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfsub_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vfsub_vf_f16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsub_vv_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfsub_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vfsub_vv_f16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsub_vf_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfsub_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vfsub_vf_f16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsub_vv_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfsub_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vfsub_vv_f16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsub_vf_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfsub_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vfsub_vf_f16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsub_vv_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfsub_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return vfsub_vv_f16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfsub_vf_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfsub_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return vfsub_vf_f16m8_m(mask, maskedoff, op1, op2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c
index a5f47d63765c..b2633d50626d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -324,3 +325,363 @@ vfloat64m8_t test_vfwadd_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
                                     vfloat64m8_t op1, float op2, size_t vl) {
   return vfwadd_wf_f64m8_m(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfwadd_vv_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vfwadd_vv_f32mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_vf_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_f32mf2 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vfwadd_vf_f32mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_wv_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_f32mf2 (vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vfwadd_wv_f32mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_wf_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.f16.i64(<vscale x 1 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_f32mf2 (vfloat32mf2_t op1, _Float16 op2, size_t vl) {
+  return vfwadd_wf_f32mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vfwadd_vv_f32m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_f32m1 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vfwadd_vf_f32m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_f32m1 (vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vfwadd_wv_f32m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.f16.i64(<vscale x 2 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_f32m1 (vfloat32m1_t op1, _Float16 op2, size_t vl) {
+  return vfwadd_wf_f32m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vfwadd_vv_f32m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_f32m2 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vfwadd_vf_f32m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_f32m2 (vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) {
+  return vfwadd_wv_f32m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.f16.i64(<vscale x 4 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_f32m2 (vfloat32m2_t op1, _Float16 op2, size_t vl) {
+  return vfwadd_wf_f32m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vfwadd_vv_f32m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_f32m4 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vfwadd_vf_f32m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_f32m4 (vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) {
+  return vfwadd_wv_f32m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.f16.i64(<vscale x 8 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_f32m4 (vfloat32m4_t op1, _Float16 op2, size_t vl) {
+  return vfwadd_wf_f32m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vfwadd_vv_f32m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_f32m8 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vfwadd_vf_f32m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_f32m8 (vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) {
+  return vfwadd_wv_f32m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.f16.i64(<vscale x 16 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_f32m8 (vfloat32m8_t op1, _Float16 op2, size_t vl) {
+  return vfwadd_wf_f32m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_vv_f32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vfwadd_vv_f32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_vf_f32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vfwadd_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_wv_f32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vfwadd_wv_f32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_wf_f32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.f16.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl) {
+  return vfwadd_wf_f32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vfwadd_vv_f32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vfwadd_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vfwadd_wv_f32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) {
+  return vfwadd_wf_f32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vfwadd_vv_f32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vfwadd_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) {
+  return vfwadd_wv_f32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.f16.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) {
+  return vfwadd_wf_f32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vfwadd_vv_f32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vfwadd_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) {
+  return vfwadd_wv_f32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.f16.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) {
+  return vfwadd_wf_f32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vfwadd_vv_f32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vfwadd_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) {
+  return vfwadd_wv_f32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.f16.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) {
+  return vfwadd_wf_f32m8_m(mask, maskedoff, op1, op2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c
index 06a1703da9c1..76f1401152ae 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -731,3 +732,669 @@ vfloat64m8_t test_vfwcvt_f_f_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
                                        vfloat32m4_t src, size_t vl) {
   return vfwcvt_f_f_v_f64m8_m(mask, maskedoff, src, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4 (vint8mf8_t src, size_t vl) {
+  return vfwcvt_f_x_v_f16mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2 (vint8mf4_t src, size_t vl) {
+  return vfwcvt_f_x_v_f16mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfwcvt_f_x_v_f16m1 (vint8mf2_t src, size_t vl) {
+  return vfwcvt_f_x_v_f16m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfwcvt_f_x_v_f16m2 (vint8m1_t src, size_t vl) {
+  return vfwcvt_f_x_v_f16m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfwcvt_f_x_v_f16m4 (vint8m2_t src, size_t vl) {
+  return vfwcvt_f_x_v_f16m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfwcvt_f_x_v_f16m8 (vint8m4_t src, size_t vl) {
+  return vfwcvt_f_x_v_f16m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4 (vuint8mf8_t src, size_t vl) {
+  return vfwcvt_f_xu_v_f16mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2 (vuint8mf4_t src, size_t vl) {
+  return vfwcvt_f_xu_v_f16mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfwcvt_f_xu_v_f16m1 (vuint8mf2_t src, size_t vl) {
+  return vfwcvt_f_xu_v_f16m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfwcvt_f_xu_v_f16m2 (vuint8m1_t src, size_t vl) {
+  return vfwcvt_f_xu_v_f16m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfwcvt_f_xu_v_f16m4 (vuint8m2_t src, size_t vl) {
+  return vfwcvt_f_xu_v_f16m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfwcvt_f_xu_v_f16m8 (vuint8m4_t src, size_t vl) {
+  return vfwcvt_f_xu_v_f16m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vfwcvt_x_f_v_i32mf2 (vfloat16mf4_t src, size_t vl) {
+  return vfwcvt_x_f_v_i32mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2 (vfloat16mf4_t src, size_t vl) {
+  return vfwcvt_rtz_x_f_v_i32mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vfwcvt_x_f_v_i32m1 (vfloat16mf2_t src, size_t vl) {
+  return vfwcvt_x_f_v_i32m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1 (vfloat16mf2_t src, size_t vl) {
+  return vfwcvt_rtz_x_f_v_i32m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vfwcvt_x_f_v_i32m2 (vfloat16m1_t src, size_t vl) {
+  return vfwcvt_x_f_v_i32m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2 (vfloat16m1_t src, size_t vl) {
+  return vfwcvt_rtz_x_f_v_i32m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vfwcvt_x_f_v_i32m4 (vfloat16m2_t src, size_t vl) {
+  return vfwcvt_x_f_v_i32m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4 (vfloat16m2_t src, size_t vl) {
+  return vfwcvt_rtz_x_f_v_i32m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vfwcvt_x_f_v_i32m8 (vfloat16m4_t src, size_t vl) {
+  return vfwcvt_x_f_v_i32m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8 (vfloat16m4_t src, size_t vl) {
+  return vfwcvt_rtz_x_f_v_i32m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2 (vfloat16mf4_t src, size_t vl) {
+  return vfwcvt_xu_f_v_u32mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2 (vfloat16mf4_t src, size_t vl) {
+  return vfwcvt_rtz_xu_f_v_u32mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vfwcvt_xu_f_v_u32m1 (vfloat16mf2_t src, size_t vl) {
+  return vfwcvt_xu_f_v_u32m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1 (vfloat16mf2_t src, size_t vl) {
+  return vfwcvt_rtz_xu_f_v_u32m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vfwcvt_xu_f_v_u32m2 (vfloat16m1_t src, size_t vl) {
+  return vfwcvt_xu_f_v_u32m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2 (vfloat16m1_t src, size_t vl) {
+  return vfwcvt_rtz_xu_f_v_u32m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vfwcvt_xu_f_v_u32m4 (vfloat16m2_t src, size_t vl) {
+  return vfwcvt_xu_f_v_u32m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4 (vfloat16m2_t src, size_t vl) {
+  return vfwcvt_rtz_xu_f_v_u32m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vfwcvt_xu_f_v_u32m8 (vfloat16m4_t src, size_t vl) {
+  return vfwcvt_xu_f_v_u32m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8 (vfloat16m4_t src, size_t vl) {
+  return vfwcvt_rtz_xu_f_v_u32m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2 (vfloat16mf4_t src, size_t vl) {
+  return vfwcvt_f_f_v_f32mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_f32m1 (vfloat16mf2_t src, size_t vl) {
+  return vfwcvt_f_f_v_f32m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_f32m2 (vfloat16m1_t src, size_t vl) {
+  return vfwcvt_f_f_v_f32m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_f32m4 (vfloat16m2_t src, size_t vl) {
+  return vfwcvt_f_f_v_f32m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_f32m8 (vfloat16m4_t src, size_t vl) {
+  return vfwcvt_f_f_v_f32m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vint8mf8_t src, size_t vl) {
+  return vfwcvt_f_x_v_f16mf4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vint8mf4_t src, size_t vl) {
+  return vfwcvt_f_x_v_f16mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfwcvt_f_x_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vint8mf2_t src, size_t vl) {
+  return vfwcvt_f_x_v_f16m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfwcvt_f_x_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vint8m1_t src, size_t vl) {
+  return vfwcvt_f_x_v_f16m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfwcvt_f_x_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vint8m2_t src, size_t vl) {
+  return vfwcvt_f_x_v_f16m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfwcvt_f_x_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vint8m4_t src, size_t vl) {
+  return vfwcvt_f_x_v_f16m8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vuint8mf8_t src, size_t vl) {
+  return vfwcvt_f_xu_v_f16mf4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vuint8mf4_t src, size_t vl) {
+  return vfwcvt_f_xu_v_f16mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vuint8mf2_t src, size_t vl) {
+  return vfwcvt_f_xu_v_f16m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vuint8m1_t src, size_t vl) {
+  return vfwcvt_f_xu_v_f16m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vuint8m2_t src, size_t vl) {
+  return vfwcvt_f_xu_v_f16m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vuint8m4_t src, size_t vl) {
+  return vfwcvt_f_xu_v_f16m8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vfwcvt_x_f_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) {
+  return vfwcvt_x_f_v_i32mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) {
+  return vfwcvt_rtz_x_f_v_i32mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vfwcvt_x_f_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) {
+  return vfwcvt_x_f_v_i32m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) {
+  return vfwcvt_rtz_x_f_v_i32m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vfwcvt_x_f_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) {
+  return vfwcvt_x_f_v_i32m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) {
+  return vfwcvt_rtz_x_f_v_i32m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vfwcvt_x_f_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) {
+  return vfwcvt_x_f_v_i32m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) {
+  return vfwcvt_rtz_x_f_v_i32m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vfwcvt_x_f_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) {
+  return vfwcvt_x_f_v_i32m8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) {
+  return vfwcvt_rtz_x_f_v_i32m8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) {
+  return vfwcvt_xu_f_v_u32mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) {
+  return vfwcvt_rtz_xu_f_v_u32mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vfwcvt_xu_f_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) {
+  return vfwcvt_xu_f_v_u32m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) {
+  return vfwcvt_rtz_xu_f_v_u32m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vfwcvt_xu_f_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) {
+  return vfwcvt_xu_f_v_u32m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) {
+  return vfwcvt_rtz_xu_f_v_u32m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vfwcvt_xu_f_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) {
+  return vfwcvt_xu_f_v_u32m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) {
+  return vfwcvt_rtz_xu_f_v_u32m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vfwcvt_xu_f_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) {
+  return vfwcvt_xu_f_v_u32m8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) {
+  return vfwcvt_rtz_xu_f_v_u32m8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) {
+  return vfwcvt_f_f_v_f32mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t src, size_t vl) {
+  return vfwcvt_f_f_v_f32m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t src, size_t vl) {
+  return vfwcvt_f_f_v_f32m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t src, size_t vl) {
+  return vfwcvt_f_f_v_f32m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t src, size_t vl) {
+  return vfwcvt_f_f_v_f32m8_m(mask, maskedoff, src, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmacc.c
index 9874057f8b7a..2425a21f8ace 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmacc.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -168,3 +169,183 @@ vfloat64m8_t test_vfwmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, float op1,
                                      vfloat32m4_t op2, size_t vl) {
   return vfwmacc_vf_f64m8_m(mask, acc, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_f32mf2 (vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfwmacc_vv_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_f32mf2 (vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfwmacc_vf_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_f32m1 (vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfwmacc_vv_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_f32m1 (vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfwmacc_vf_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_f32m2 (vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
+  return vfwmacc_vv_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_f32m2 (vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
+  return vfwmacc_vf_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_f32m4 (vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
+  return vfwmacc_vv_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_f32m4 (vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
+  return vfwmacc_vf_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_f32m8 (vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
+  return vfwmacc_vv_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_f32m8 (vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {
+  return vfwmacc_vf_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfwmacc_vv_f32mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfwmacc_vf_f32mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_f32m1_m (vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfwmacc_vv_f32m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_f32m1_m (vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfwmacc_vf_f32m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_f32m2_m (vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
+  return vfwmacc_vv_f32m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_f32m2_m (vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
+  return vfwmacc_vf_f32m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_f32m4_m (vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
+  return vfwmacc_vv_f32m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_f32m4_m (vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
+  return vfwmacc_vf_f32m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_f32m8_m (vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
+  return vfwmacc_vv_f32m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_f32m8_m (vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {
+  return vfwmacc_vf_f32m8_m(mask, vd, vs1, vs2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmsac.c
index b02a1feed219..347392b7ff03 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmsac.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -168,3 +169,183 @@ vfloat64m8_t test_vfwmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, float op1,
                                      vfloat32m4_t op2, size_t vl) {
   return vfwmsac_vf_f64m8_m(mask, acc, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_f32mf2 (vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfwmsac_vv_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_f32mf2 (vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfwmsac_vf_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_f32m1 (vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfwmsac_vv_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_f32m1 (vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfwmsac_vf_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_f32m2 (vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
+  return vfwmsac_vv_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_f32m2 (vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
+  return vfwmsac_vf_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_f32m4 (vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
+  return vfwmsac_vv_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_f32m4 (vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
+  return vfwmsac_vf_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_f32m8 (vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
+  return vfwmsac_vv_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_f32m8 (vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {
+  return vfwmsac_vf_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfwmsac_vv_f32mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfwmsac_vf_f32mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_f32m1_m (vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfwmsac_vv_f32m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_f32m1_m (vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfwmsac_vf_f32m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_f32m2_m (vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
+  return vfwmsac_vv_f32m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_f32m2_m (vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
+  return vfwmsac_vf_f32m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_f32m4_m (vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
+  return vfwmsac_vv_f32m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_f32m4_m (vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
+  return vfwmsac_vf_f32m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_f32m8_m (vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
+  return vfwmsac_vv_f32m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_f32m8_m (vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {
+  return vfwmsac_vf_f32m8_m(mask, vd, vs1, vs2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c
index 2c7734983aa2..fe6b66862461 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -164,3 +165,183 @@ vfloat64m8_t test_vfwmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
                                     vfloat32m4_t op1, float op2, size_t vl) {
   return vfwmul_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfwmul_vv_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vfwmul_vv_f32mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmul_vf_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_f32mf2 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vfwmul_vf_f32mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vfwmul_vv_f32m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_f32m1 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vfwmul_vf_f32m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vfwmul_vv_f32m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_f32m2 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vfwmul_vf_f32m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vfwmul_vv_f32m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_f32m4 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vfwmul_vf_f32m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vfwmul_vv_f32m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_f32m8 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vfwmul_vf_f32m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmul_vv_f32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vfwmul_vv_f32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmul_vf_f32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vfwmul_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vfwmul_vv_f32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vfwmul_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vfwmul_vv_f32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vfwmul_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vfwmul_vv_f32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vfwmul_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vfwmul_vv_f32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vfwmul_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmacc.c
index 1a1530d41056..6f5a2ba59c15 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmacc.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -168,3 +169,183 @@ vfloat64m8_t test_vfwnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
                                       float op1, vfloat32m4_t op2, size_t vl) {
   return vfwnmacc_vf_f64m8_m(mask, acc, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_f32mf2 (vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfwnmacc_vv_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_f32mf2 (vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfwnmacc_vf_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_f32m1 (vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfwnmacc_vv_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_f32m1 (vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfwnmacc_vf_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_f32m2 (vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
+  return vfwnmacc_vv_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_f32m2 (vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
+  return vfwnmacc_vf_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_f32m4 (vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
+  return vfwnmacc_vv_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_f32m4 (vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
+  return vfwnmacc_vf_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_f32m8 (vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
+  return vfwnmacc_vv_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_f32m8 (vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {
+  return vfwnmacc_vf_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfwnmacc_vv_f32mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfwnmacc_vf_f32mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_f32m1_m (vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfwnmacc_vv_f32m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_f32m1_m (vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfwnmacc_vf_f32m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_f32m2_m (vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
+  return vfwnmacc_vv_f32m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_f32m2_m (vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
+  return vfwnmacc_vf_f32m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_f32m4_m (vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
+  return vfwnmacc_vv_f32m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_f32m4_m (vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
+  return vfwnmacc_vf_f32m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_f32m8_m (vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
+  return vfwnmacc_vv_f32m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_f32m8_m (vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {
+  return vfwnmacc_vf_f32m8_m(mask, vd, vs1, vs2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmsac.c
index ae2c8d108529..870ed4d3778a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmsac.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -168,3 +169,183 @@ vfloat64m8_t test_vfwnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
                                       float op1, vfloat32m4_t op2, size_t vl) {
   return vfwnmsac_vf_f64m8_m(mask, acc, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_f32mf2 (vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfwnmsac_vv_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_f32mf2 (vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfwnmsac_vf_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_f32m1 (vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfwnmsac_vv_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_f32m1 (vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfwnmsac_vf_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_f32m2 (vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
+  return vfwnmsac_vv_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_f32m2 (vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
+  return vfwnmsac_vf_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_f32m4 (vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
+  return vfwnmsac_vv_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_f32m4 (vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
+  return vfwnmsac_vf_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_f32m8 (vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
+  return vfwnmsac_vv_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_f32m8 (vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {
+  return vfwnmsac_vf_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfwnmsac_vv_f32mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
+  return vfwnmsac_vf_f32mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_f32m1_m (vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfwnmsac_vv_f32m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_f32m1_m (vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
+  return vfwnmsac_vf_f32m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_f32m2_m (vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
+  return vfwnmsac_vv_f32m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_f32m2_m (vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
+  return vfwnmsac_vf_f32m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_f32m4_m (vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
+  return vfwnmsac_vv_f32m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_f32m4_m (vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
+  return vfwnmsac_vf_f32m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_f32m8_m (vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
+  return vfwnmsac_vv_f32m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_f32m8_m (vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {
+  return vfwnmsac_vf_f32m8_m(mask, vd, vs1, vs2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredosum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredosum.c
index 39078a1fe9a3..590cd741d155 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredosum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredosum.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -114,3 +115,111 @@ vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst,
                                               vfloat64m1_t scalar, size_t vl) {
   return vfwredosum_vs_f32m8_f64m1_m(mask, dst, vector, scalar, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf4_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv1f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1 (vfloat32m1_t dest, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f16mf4_f32m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf2_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv2f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1 (vfloat32m1_t dest, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f16mf2_f32m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m1_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv4f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1 (vfloat32m1_t dest, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f16m1_f32m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m2_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv8f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1 (vfloat32m1_t dest, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f16m2_f32m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m4_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv16f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1 (vfloat32m1_t dest, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f16m4_f32m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m8_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv32f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1 (vfloat32m1_t dest, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f16m8_f32m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf4_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_m (vbool64_t mask, vfloat32m1_t dest, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f16mf4_f32m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf2_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_m (vbool32_t mask, vfloat32m1_t dest, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f16mf2_f32m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m1_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_m (vbool16_t mask, vfloat32m1_t dest, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f16m1_f32m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m2_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_m (vbool8_t mask, vfloat32m1_t dest, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f16m2_f32m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m4_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_m (vbool4_t mask, vfloat32m1_t dest, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f16m4_f32m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m8_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_m (vbool2_t mask, vfloat32m1_t dest, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f16m8_f32m1_m(mask, dest, vector, scalar, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c
index 89447ad8dc9f..aefde6483960 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -114,3 +115,111 @@ vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst,
                                              vfloat64m1_t scalar, size_t vl) {
   return vfwredsum_vs_f32m8_f64m1_m(mask, dst, vector, scalar, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16mf4_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv1f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredsum_vs_f16mf4_f32m1 (vfloat32m1_t dest, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredsum_vs_f16mf4_f32m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16mf2_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv2f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredsum_vs_f16mf2_f32m1 (vfloat32m1_t dest, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredsum_vs_f16mf2_f32m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16m1_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv4f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredsum_vs_f16m1_f32m1 (vfloat32m1_t dest, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredsum_vs_f16m1_f32m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16m2_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv8f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredsum_vs_f16m2_f32m1 (vfloat32m1_t dest, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredsum_vs_f16m2_f32m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16m4_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv16f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredsum_vs_f16m4_f32m1 (vfloat32m1_t dest, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredsum_vs_f16m4_f32m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16m8_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv32f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredsum_vs_f16m8_f32m1 (vfloat32m1_t dest, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredsum_vs_f16m8_f32m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16mf4_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv1f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredsum_vs_f16mf4_f32m1_m (vbool64_t mask, vfloat32m1_t dest, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredsum_vs_f16mf4_f32m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16mf2_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv2f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredsum_vs_f16mf2_f32m1_m (vbool32_t mask, vfloat32m1_t dest, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredsum_vs_f16mf2_f32m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16m1_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv4f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredsum_vs_f16m1_f32m1_m (vbool16_t mask, vfloat32m1_t dest, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredsum_vs_f16m1_f32m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16m2_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv8f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredsum_vs_f16m2_f32m1_m (vbool8_t mask, vfloat32m1_t dest, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredsum_vs_f16m2_f32m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16m4_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv16f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredsum_vs_f16m4_f32m1_m (vbool4_t mask, vfloat32m1_t dest, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredsum_vs_f16m4_f32m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16m8_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv32f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredsum_vs_f16m8_f32m1_m (vbool2_t mask, vfloat32m1_t dest, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredsum_vs_f16m8_f32m1_m(mask, dest, vector, scalar, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c
index 8eb6a199e149..95c1d9eb8abc 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -324,3 +325,363 @@ vfloat64m8_t test_vfwsub_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
                                     vfloat64m8_t op1, float op2, size_t vl) {
   return vfwsub_wf_f64m8_m(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfwsub_vv_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vfwsub_vv_f32mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_vf_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_f32mf2 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vfwsub_vf_f32mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_wv_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_f32mf2 (vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vfwsub_wv_f32mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_wf_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.f16.i64(<vscale x 1 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_f32mf2 (vfloat32mf2_t op1, _Float16 op2, size_t vl) {
+  return vfwsub_wf_f32mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vfwsub_vv_f32m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_f32m1 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vfwsub_vf_f32m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_f32m1 (vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vfwsub_wv_f32m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.f16.i64(<vscale x 2 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_f32m1 (vfloat32m1_t op1, _Float16 op2, size_t vl) {
+  return vfwsub_wf_f32m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vfwsub_vv_f32m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_f32m2 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vfwsub_vf_f32m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_f32m2 (vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) {
+  return vfwsub_wv_f32m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.f16.i64(<vscale x 4 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_f32m2 (vfloat32m2_t op1, _Float16 op2, size_t vl) {
+  return vfwsub_wf_f32m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vfwsub_vv_f32m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_f32m4 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vfwsub_vf_f32m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_f32m4 (vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) {
+  return vfwsub_wv_f32m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.f16.i64(<vscale x 8 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_f32m4 (vfloat32m4_t op1, _Float16 op2, size_t vl) {
+  return vfwsub_wf_f32m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vfwsub_vv_f32m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_f32m8 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vfwsub_vf_f32m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_f32m8 (vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) {
+  return vfwsub_wv_f32m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.f16.i64(<vscale x 16 x float> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_f32m8 (vfloat32m8_t op1, _Float16 op2, size_t vl) {
+  return vfwsub_wf_f32m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_vv_f32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vfwsub_vv_f32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_vf_f32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vfwsub_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_wv_f32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vfwsub_wv_f32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_wf_f32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.f16.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl) {
+  return vfwsub_wf_f32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vfwsub_vv_f32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vfwsub_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vfwsub_wv_f32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) {
+  return vfwsub_wf_f32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vfwsub_vv_f32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vfwsub_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) {
+  return vfwsub_wv_f32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.f16.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) {
+  return vfwsub_wf_f32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vfwsub_vv_f32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vfwsub_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) {
+  return vfwsub_wv_f32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.f16.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) {
+  return vfwsub_wf_f32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vfwsub_vv_f32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vfwsub_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) {
+  return vfwsub_wv_f32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.f16.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) {
+  return vfwsub_wf_f32m8_m(mask, maskedoff, op1, op2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c
index 6cc41e9acc32..d56c6247cadd 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c
@@ -1,8 +1,8 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
-// RUN:   -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s \
-// RUN:   -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
 
@@ -1384,3 +1384,158 @@ vfloat64m8_t test_vle64ff_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, cons
   return vle64ff_v_f64m8_m(mask, maskedoff, base, new_vl, vl);
 }
 
+// CHECK-RV64-LABEL: @test_vle16ff_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 1 x half>, i64 } @llvm.riscv.vleff.nxv1f16.i64(<vscale x 1 x half>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP2]]
+//
+vfloat16mf4_t test_vle16ff_v_f16mf4 (const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vle16ff_v_f16mf4 (base, new_vl, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vle16ff_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 2 x half>, i64 } @llvm.riscv.vleff.nxv2f16.i64(<vscale x 2 x half>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP2]]
+//
+vfloat16mf2_t test_vle16ff_v_f16mf2 (const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vle16ff_v_f16mf2 (base, new_vl, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vle16ff_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 4 x half>, i64 } @llvm.riscv.vleff.nxv4f16.i64(<vscale x 4 x half>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP2]]
+//
+vfloat16m1_t test_vle16ff_v_f16m1 (const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vle16ff_v_f16m1 (base, new_vl, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vle16ff_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 8 x half>, i64 } @llvm.riscv.vleff.nxv8f16.i64(<vscale x 8 x half>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP2]]
+//
+vfloat16m2_t test_vle16ff_v_f16m2 (const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vle16ff_v_f16m2 (base, new_vl, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vle16ff_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 16 x half>, i64 } @llvm.riscv.vleff.nxv16f16.i64(<vscale x 16 x half>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x half>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 16 x half>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP2]]
+//
+vfloat16m4_t test_vle16ff_v_f16m4 (const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vle16ff_v_f16m4 (base, new_vl, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vle16ff_v_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 32 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 32 x half>, i64 } @llvm.riscv.vleff.nxv32f16.i64(<vscale x 32 x half>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 32 x half>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 32 x half>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP2]]
+//
+vfloat16m8_t test_vle16ff_v_f16m8 (const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vle16ff_v_f16m8 (base, new_vl, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vle16ff_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 1 x half>, i64 } @llvm.riscv.vleff.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP2]]
+//
+vfloat16mf4_t test_vle16ff_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vle16ff_v_f16mf4_m (mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vle16ff_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 2 x half>, i64 } @llvm.riscv.vleff.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP2]]
+//
+vfloat16mf2_t test_vle16ff_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vle16ff_v_f16mf2_m (mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vle16ff_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 4 x half>, i64 } @llvm.riscv.vleff.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP2]]
+//
+vfloat16m1_t test_vle16ff_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vle16ff_v_f16m1_m (mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vle16ff_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 8 x half>, i64 } @llvm.riscv.vleff.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP2]]
+//
+vfloat16m2_t test_vle16ff_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vle16ff_v_f16m2_m (mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vle16ff_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 16 x half>, i64 } @llvm.riscv.vleff.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x half>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 16 x half>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP2]]
+//
+vfloat16m4_t test_vle16ff_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vle16ff_v_f16m4_m (mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vle16ff_v_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 32 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 32 x half>, i64 } @llvm.riscv.vleff.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 32 x half>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 32 x half>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP2]]
+//
+vfloat16m8_t test_vle16ff_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vle16ff_v_f16m8_m (mask, maskedoff, base, new_vl, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlmul.c
index e781c2380674..df71bad82217 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlmul.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -2164,3 +2165,273 @@ vfloat64m2_t test_vlmul_trunc_v_f64m8_f64m2(vfloat64m8_t op1) {
 vfloat64m4_t test_vlmul_trunc_v_f64m8_f64m4(vfloat64m8_t op1) {
   return vlmul_trunc_v_f64m8_f64m4(op1);
 }
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.experimental.vector.insert.nxv2f16.nxv1f16(<vscale x 2 x half> undef, <vscale x 1 x half> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vlmul_ext_v_f16mf4_f16mf2 (vfloat16mf4_t op1) {
+  return vlmul_ext_v_f16mf4_f16mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.experimental.vector.insert.nxv4f16.nxv1f16(<vscale x 4 x half> undef, <vscale x 1 x half> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vlmul_ext_v_f16mf4_f16m1 (vfloat16mf4_t op1) {
+  return vlmul_ext_v_f16mf4_f16m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.experimental.vector.insert.nxv8f16.nxv1f16(<vscale x 8 x half> undef, <vscale x 1 x half> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vlmul_ext_v_f16mf4_f16m2 (vfloat16mf4_t op1) {
+  return vlmul_ext_v_f16mf4_f16m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.experimental.vector.insert.nxv16f16.nxv1f16(<vscale x 16 x half> undef, <vscale x 1 x half> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vlmul_ext_v_f16mf4_f16m4 (vfloat16mf4_t op1) {
+  return vlmul_ext_v_f16mf4_f16m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv32f16.nxv1f16(<vscale x 32 x half> undef, <vscale x 1 x half> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vlmul_ext_v_f16mf4_f16m8 (vfloat16mf4_t op1) {
+  return vlmul_ext_v_f16mf4_f16m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf2_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.experimental.vector.insert.nxv4f16.nxv2f16(<vscale x 4 x half> undef, <vscale x 2 x half> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vlmul_ext_v_f16mf2_f16m1 (vfloat16mf2_t op1) {
+  return vlmul_ext_v_f16mf2_f16m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf2_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.experimental.vector.insert.nxv8f16.nxv2f16(<vscale x 8 x half> undef, <vscale x 2 x half> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vlmul_ext_v_f16mf2_f16m2 (vfloat16mf2_t op1) {
+  return vlmul_ext_v_f16mf2_f16m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf2_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.experimental.vector.insert.nxv16f16.nxv2f16(<vscale x 16 x half> undef, <vscale x 2 x half> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vlmul_ext_v_f16mf2_f16m4 (vfloat16mf2_t op1) {
+  return vlmul_ext_v_f16mf2_f16m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf2_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv32f16.nxv2f16(<vscale x 32 x half> undef, <vscale x 2 x half> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vlmul_ext_v_f16mf2_f16m8 (vfloat16mf2_t op1) {
+  return vlmul_ext_v_f16mf2_f16m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m1_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.experimental.vector.insert.nxv8f16.nxv4f16(<vscale x 8 x half> undef, <vscale x 4 x half> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vlmul_ext_v_f16m1_f16m2 (vfloat16m1_t op1) {
+  return vlmul_ext_v_f16m1_f16m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m1_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.experimental.vector.insert.nxv16f16.nxv4f16(<vscale x 16 x half> undef, <vscale x 4 x half> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vlmul_ext_v_f16m1_f16m4 (vfloat16m1_t op1) {
+  return vlmul_ext_v_f16m1_f16m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m1_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv32f16.nxv4f16(<vscale x 32 x half> undef, <vscale x 4 x half> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vlmul_ext_v_f16m1_f16m8 (vfloat16m1_t op1) {
+  return vlmul_ext_v_f16m1_f16m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m2_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.experimental.vector.insert.nxv16f16.nxv8f16(<vscale x 16 x half> undef, <vscale x 8 x half> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vlmul_ext_v_f16m2_f16m4 (vfloat16m2_t op1) {
+  return vlmul_ext_v_f16m2_f16m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m2_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv32f16.nxv8f16(<vscale x 32 x half> undef, <vscale x 8 x half> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vlmul_ext_v_f16m2_f16m8 (vfloat16m2_t op1) {
+  return vlmul_ext_v_f16m2_f16m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m4_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv32f16.nxv16f16(<vscale x 32 x half> undef, <vscale x 16 x half> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vlmul_ext_v_f16m4_f16m8 (vfloat16m4_t op1) {
+  return vlmul_ext_v_f16m4_f16m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16mf2_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.experimental.vector.extract.nxv1f16.nxv2f16(<vscale x 2 x half> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vlmul_trunc_v_f16mf2_f16mf4 (vfloat16mf2_t op1) {
+  return vlmul_trunc_v_f16mf2_f16mf4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m1_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.experimental.vector.extract.nxv1f16.nxv4f16(<vscale x 4 x half> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vlmul_trunc_v_f16m1_f16mf4 (vfloat16m1_t op1) {
+  return vlmul_trunc_v_f16m1_f16mf4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m1_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.experimental.vector.extract.nxv2f16.nxv4f16(<vscale x 4 x half> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vlmul_trunc_v_f16m1_f16mf2 (vfloat16m1_t op1) {
+  return vlmul_trunc_v_f16m1_f16mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m2_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.experimental.vector.extract.nxv1f16.nxv8f16(<vscale x 8 x half> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vlmul_trunc_v_f16m2_f16mf4 (vfloat16m2_t op1) {
+  return vlmul_trunc_v_f16m2_f16mf4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m2_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.experimental.vector.extract.nxv2f16.nxv8f16(<vscale x 8 x half> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vlmul_trunc_v_f16m2_f16mf2 (vfloat16m2_t op1) {
+  return vlmul_trunc_v_f16m2_f16mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m2_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.experimental.vector.extract.nxv4f16.nxv8f16(<vscale x 8 x half> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vlmul_trunc_v_f16m2_f16m1 (vfloat16m2_t op1) {
+  return vlmul_trunc_v_f16m2_f16m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.experimental.vector.extract.nxv1f16.nxv16f16(<vscale x 16 x half> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vlmul_trunc_v_f16m4_f16mf4 (vfloat16m4_t op1) {
+  return vlmul_trunc_v_f16m4_f16mf4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.experimental.vector.extract.nxv2f16.nxv16f16(<vscale x 16 x half> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vlmul_trunc_v_f16m4_f16mf2 (vfloat16m4_t op1) {
+  return vlmul_trunc_v_f16m4_f16mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.experimental.vector.extract.nxv4f16.nxv16f16(<vscale x 16 x half> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vlmul_trunc_v_f16m4_f16m1 (vfloat16m4_t op1) {
+  return vlmul_trunc_v_f16m4_f16m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.experimental.vector.extract.nxv8f16.nxv16f16(<vscale x 16 x half> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vlmul_trunc_v_f16m4_f16m2 (vfloat16m4_t op1) {
+  return vlmul_trunc_v_f16m4_f16m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.experimental.vector.extract.nxv1f16.nxv32f16(<vscale x 32 x half> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vlmul_trunc_v_f16m8_f16mf4 (vfloat16m8_t op1) {
+  return vlmul_trunc_v_f16m8_f16mf4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.experimental.vector.extract.nxv2f16.nxv32f16(<vscale x 32 x half> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vlmul_trunc_v_f16m8_f16mf2 (vfloat16m8_t op1) {
+  return vlmul_trunc_v_f16m8_f16mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.experimental.vector.extract.nxv4f16.nxv32f16(<vscale x 32 x half> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vlmul_trunc_v_f16m8_f16m1 (vfloat16m8_t op1) {
+  return vlmul_trunc_v_f16m8_f16m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.experimental.vector.extract.nxv8f16.nxv32f16(<vscale x 32 x half> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vlmul_trunc_v_f16m8_f16m2 (vfloat16m8_t op1) {
+  return vlmul_trunc_v_f16m8_f16m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.experimental.vector.extract.nxv16f16.nxv32f16(<vscale x 32 x half> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vlmul_trunc_v_f16m8_f16m4 (vfloat16m8_t op1) {
+  return vlmul_trunc_v_f16m8_f16m4(op1);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c
index 2948c337650a..132d855e3866 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -11,7 +12,7 @@
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
-vint8mf8_t testuxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl) {
+vint8mf8_t test_vloxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl) {
   return vloxei8_v_i8mf8(base, bindex, vl);
 }
 
@@ -21,7 +22,7 @@ vint8mf8_t testuxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
-vint8mf4_t testuxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl) {
+vint8mf4_t test_vloxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl) {
   return vloxei8_v_i8mf4(base, bindex, vl);
 }
 
@@ -31,7 +32,7 @@ vint8mf4_t testuxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
-vint8mf2_t testuxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl) {
+vint8mf2_t test_vloxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl) {
   return vloxei8_v_i8mf2(base, bindex, vl);
 }
 
@@ -41,7 +42,7 @@ vint8mf2_t testuxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
-vint8m1_t testuxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) {
+vint8m1_t test_vloxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) {
   return vloxei8_v_i8m1(base, bindex, vl);
 }
 
@@ -51,7 +52,7 @@ vint8m1_t testuxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
-vint8m2_t testuxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) {
+vint8m2_t test_vloxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) {
   return vloxei8_v_i8m2(base, bindex, vl);
 }
 
@@ -61,7 +62,7 @@ vint8m2_t testuxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
-vint8m4_t testuxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) {
+vint8m4_t test_vloxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) {
   return vloxei8_v_i8m4(base, bindex, vl);
 }
 
@@ -71,7 +72,7 @@ vint8m4_t testuxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP1]]
 //
-vint8m8_t testuxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) {
+vint8m8_t test_vloxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) {
   return vloxei8_v_i8m8(base, bindex, vl);
 }
 
@@ -81,7 +82,7 @@ vint8m8_t testuxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
-vint8mf8_t testuxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl) {
+vint8mf8_t test_vloxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl) {
   return vloxei16_v_i8mf8(base, bindex, vl);
 }
 
@@ -91,7 +92,7 @@ vint8mf8_t testuxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
-vint8mf4_t testuxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl) {
+vint8mf4_t test_vloxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl) {
   return vloxei16_v_i8mf4(base, bindex, vl);
 }
 
@@ -101,7 +102,7 @@ vint8mf4_t testuxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
-vint8mf2_t testuxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl) {
+vint8mf2_t test_vloxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl) {
   return vloxei16_v_i8mf2(base, bindex, vl);
 }
 
@@ -111,7 +112,7 @@ vint8mf2_t testuxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
-vint8m1_t testuxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) {
+vint8m1_t test_vloxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) {
   return vloxei16_v_i8m1(base, bindex, vl);
 }
 
@@ -121,7 +122,7 @@ vint8m1_t testuxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
-vint8m2_t testuxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) {
+vint8m2_t test_vloxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) {
   return vloxei16_v_i8m2(base, bindex, vl);
 }
 
@@ -131,7 +132,7 @@ vint8m2_t testuxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
-vint8m4_t testuxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) {
+vint8m4_t test_vloxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) {
   return vloxei16_v_i8m4(base, bindex, vl);
 }
 
@@ -141,7 +142,7 @@ vint8m4_t testuxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
-vint8mf8_t testuxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl) {
+vint8mf8_t test_vloxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl) {
   return vloxei32_v_i8mf8(base, bindex, vl);
 }
 
@@ -151,7 +152,7 @@ vint8mf8_t testuxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
-vint8mf4_t testuxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl) {
+vint8mf4_t test_vloxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl) {
   return vloxei32_v_i8mf4(base, bindex, vl);
 }
 
@@ -161,7 +162,7 @@ vint8mf4_t testuxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
-vint8mf2_t testuxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl) {
+vint8mf2_t test_vloxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl) {
   return vloxei32_v_i8mf2(base, bindex, vl);
 }
 
@@ -171,7 +172,7 @@ vint8mf2_t testuxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
-vint8m1_t testuxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) {
+vint8m1_t test_vloxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) {
   return vloxei32_v_i8m1(base, bindex, vl);
 }
 
@@ -181,7 +182,7 @@ vint8m1_t testuxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
-vint8m2_t testuxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) {
+vint8m2_t test_vloxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) {
   return vloxei32_v_i8m2(base, bindex, vl);
 }
 
@@ -191,7 +192,7 @@ vint8m2_t testuxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
-vint8mf8_t testuxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl) {
+vint8mf8_t test_vloxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl) {
   return vloxei64_v_i8mf8(base, bindex, vl);
 }
 
@@ -201,7 +202,7 @@ vint8mf8_t testuxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
-vint8mf4_t testuxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl) {
+vint8mf4_t test_vloxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl) {
   return vloxei64_v_i8mf4(base, bindex, vl);
 }
 
@@ -211,7 +212,7 @@ vint8mf4_t testuxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
-vint8mf2_t testuxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl) {
+vint8mf2_t test_vloxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl) {
   return vloxei64_v_i8mf2(base, bindex, vl);
 }
 
@@ -221,7 +222,7 @@ vint8mf2_t testuxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
-vint8m1_t testuxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) {
+vint8m1_t test_vloxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) {
   return vloxei64_v_i8m1(base, bindex, vl);
 }
 
@@ -231,7 +232,7 @@ vint8m1_t testuxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
-vint16mf4_t testuxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t vl) {
+vint16mf4_t test_vloxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t vl) {
   return vloxei8_v_i16mf4(base, bindex, vl);
 }
 
@@ -241,7 +242,7 @@ vint16mf4_t testuxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
-vint16mf2_t testuxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t vl) {
+vint16mf2_t test_vloxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t vl) {
   return vloxei8_v_i16mf2(base, bindex, vl);
 }
 
@@ -251,7 +252,7 @@ vint16mf2_t testuxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
-vint16m1_t testuxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl) {
+vint16m1_t test_vloxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl) {
   return vloxei8_v_i16m1(base, bindex, vl);
 }
 
@@ -261,7 +262,7 @@ vint16m1_t testuxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
-vint16m2_t testuxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl) {
+vint16m2_t test_vloxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl) {
   return vloxei8_v_i16m2(base, bindex, vl);
 }
 
@@ -271,7 +272,7 @@ vint16m2_t testuxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
-vint16m4_t testuxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl) {
+vint16m4_t test_vloxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl) {
   return vloxei8_v_i16m4(base, bindex, vl);
 }
 
@@ -281,7 +282,7 @@ vint16m4_t testuxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
-vint16m8_t testuxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl) {
+vint16m8_t test_vloxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl) {
   return vloxei8_v_i16m8(base, bindex, vl);
 }
 
@@ -291,7 +292,7 @@ vint16m8_t testuxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
-vint16mf4_t testuxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t vl) {
+vint16mf4_t test_vloxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t vl) {
   return vloxei16_v_i16mf4(base, bindex, vl);
 }
 
@@ -301,7 +302,7 @@ vint16mf4_t testuxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
-vint16mf2_t testuxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t vl) {
+vint16mf2_t test_vloxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t vl) {
   return vloxei16_v_i16mf2(base, bindex, vl);
 }
 
@@ -311,7 +312,7 @@ vint16mf2_t testuxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
-vint16m1_t testuxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl) {
+vint16m1_t test_vloxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl) {
   return vloxei16_v_i16m1(base, bindex, vl);
 }
 
@@ -321,7 +322,7 @@ vint16m1_t testuxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
-vint16m2_t testuxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl) {
+vint16m2_t test_vloxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl) {
   return vloxei16_v_i16m2(base, bindex, vl);
 }
 
@@ -331,7 +332,7 @@ vint16m2_t testuxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
-vint16m4_t testuxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl) {
+vint16m4_t test_vloxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl) {
   return vloxei16_v_i16m4(base, bindex, vl);
 }
 
@@ -341,7 +342,7 @@ vint16m4_t testuxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
-vint16m8_t testuxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl) {
+vint16m8_t test_vloxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl) {
   return vloxei16_v_i16m8(base, bindex, vl);
 }
 
@@ -351,7 +352,7 @@ vint16m8_t testuxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
-vint16mf4_t testuxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t vl) {
+vint16mf4_t test_vloxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t vl) {
   return vloxei32_v_i16mf4(base, bindex, vl);
 }
 
@@ -361,7 +362,7 @@ vint16mf4_t testuxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
-vint16mf2_t testuxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t vl) {
+vint16mf2_t test_vloxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t vl) {
   return vloxei32_v_i16mf2(base, bindex, vl);
 }
 
@@ -371,7 +372,7 @@ vint16mf2_t testuxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
-vint16m1_t testuxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl) {
+vint16m1_t test_vloxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl) {
   return vloxei32_v_i16m1(base, bindex, vl);
 }
 
@@ -381,7 +382,7 @@ vint16m1_t testuxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
-vint16m2_t testuxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl) {
+vint16m2_t test_vloxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl) {
   return vloxei32_v_i16m2(base, bindex, vl);
 }
 
@@ -391,7 +392,7 @@ vint16m2_t testuxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
-vint16m4_t testuxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl) {
+vint16m4_t test_vloxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl) {
   return vloxei32_v_i16m4(base, bindex, vl);
 }
 
@@ -401,7 +402,7 @@ vint16m4_t testuxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
-vint16mf4_t testuxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t vl) {
+vint16mf4_t test_vloxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t vl) {
   return vloxei64_v_i16mf4(base, bindex, vl);
 }
 
@@ -411,7 +412,7 @@ vint16mf4_t testuxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
-vint16mf2_t testuxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t vl) {
+vint16mf2_t test_vloxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t vl) {
   return vloxei64_v_i16mf2(base, bindex, vl);
 }
 
@@ -421,7 +422,7 @@ vint16mf2_t testuxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
-vint16m1_t testuxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl) {
+vint16m1_t test_vloxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl) {
   return vloxei64_v_i16m1(base, bindex, vl);
 }
 
@@ -431,7 +432,7 @@ vint16m1_t testuxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
-vint16m2_t testuxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl) {
+vint16m2_t test_vloxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl) {
   return vloxei64_v_i16m2(base, bindex, vl);
 }
 
@@ -441,7 +442,7 @@ vint16m2_t testuxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
-vint32mf2_t testuxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t vl) {
+vint32mf2_t test_vloxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t vl) {
   return vloxei8_v_i32mf2(base, bindex, vl);
 }
 
@@ -451,7 +452,7 @@ vint32mf2_t testuxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
-vint32m1_t testuxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl) {
+vint32m1_t test_vloxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl) {
   return vloxei8_v_i32m1(base, bindex, vl);
 }
 
@@ -461,7 +462,7 @@ vint32m1_t testuxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
-vint32m2_t testuxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl) {
+vint32m2_t test_vloxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl) {
   return vloxei8_v_i32m2(base, bindex, vl);
 }
 
@@ -471,7 +472,7 @@ vint32m2_t testuxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
-vint32m4_t testuxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl) {
+vint32m4_t test_vloxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl) {
   return vloxei8_v_i32m4(base, bindex, vl);
 }
 
@@ -481,7 +482,7 @@ vint32m4_t testuxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
-vint32m8_t testuxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl) {
+vint32m8_t test_vloxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl) {
   return vloxei8_v_i32m8(base, bindex, vl);
 }
 
@@ -491,7 +492,7 @@ vint32m8_t testuxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
-vint32mf2_t testuxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t vl) {
+vint32mf2_t test_vloxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t vl) {
   return vloxei16_v_i32mf2(base, bindex, vl);
 }
 
@@ -501,7 +502,7 @@ vint32mf2_t testuxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
-vint32m1_t testuxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t vl) {
+vint32m1_t test_vloxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t vl) {
   return vloxei16_v_i32m1(base, bindex, vl);
 }
 
@@ -511,7 +512,7 @@ vint32m1_t testuxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
-vint32m2_t testuxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl) {
+vint32m2_t test_vloxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl) {
   return vloxei16_v_i32m2(base, bindex, vl);
 }
 
@@ -521,7 +522,7 @@ vint32m2_t testuxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
-vint32m4_t testuxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl) {
+vint32m4_t test_vloxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl) {
   return vloxei16_v_i32m4(base, bindex, vl);
 }
 
@@ -531,7 +532,7 @@ vint32m4_t testuxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
-vint32m8_t testuxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl) {
+vint32m8_t test_vloxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl) {
   return vloxei16_v_i32m8(base, bindex, vl);
 }
 
@@ -541,7 +542,7 @@ vint32m8_t testuxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
-vint32mf2_t testuxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+vint32mf2_t test_vloxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t vl) {
   return vloxei32_v_i32mf2(base, bindex, vl);
 }
 
@@ -551,7 +552,7 @@ vint32mf2_t testuxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
-vint32m1_t testuxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl) {
+vint32m1_t test_vloxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl) {
   return vloxei32_v_i32m1(base, bindex, vl);
 }
 
@@ -561,7 +562,7 @@ vint32m1_t testuxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
-vint32m2_t testuxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl) {
+vint32m2_t test_vloxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl) {
   return vloxei32_v_i32m2(base, bindex, vl);
 }
 
@@ -571,7 +572,7 @@ vint32m2_t testuxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
-vint32m4_t testuxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl) {
+vint32m4_t test_vloxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl) {
   return vloxei32_v_i32m4(base, bindex, vl);
 }
 
@@ -581,7 +582,7 @@ vint32m4_t testuxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
-vint32m8_t testuxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl) {
+vint32m8_t test_vloxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl) {
   return vloxei32_v_i32m8(base, bindex, vl);
 }
 
@@ -591,7 +592,7 @@ vint32m8_t testuxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
-vint32mf2_t testuxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t vl) {
+vint32mf2_t test_vloxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t vl) {
   return vloxei64_v_i32mf2(base, bindex, vl);
 }
 
@@ -601,7 +602,7 @@ vint32mf2_t testuxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
-vint32m1_t testuxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl) {
+vint32m1_t test_vloxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl) {
   return vloxei64_v_i32m1(base, bindex, vl);
 }
 
@@ -611,7 +612,7 @@ vint32m1_t testuxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
-vint32m2_t testuxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl) {
+vint32m2_t test_vloxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl) {
   return vloxei64_v_i32m2(base, bindex, vl);
 }
 
@@ -621,7 +622,7 @@ vint32m2_t testuxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
-vint32m4_t testuxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl) {
+vint32m4_t test_vloxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl) {
   return vloxei64_v_i32m4(base, bindex, vl);
 }
 
@@ -631,7 +632,7 @@ vint32m4_t testuxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
-vint64m1_t testuxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl) {
+vint64m1_t test_vloxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl) {
   return vloxei8_v_i64m1(base, bindex, vl);
 }
 
@@ -641,7 +642,7 @@ vint64m1_t testuxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
-vint64m2_t testuxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl) {
+vint64m2_t test_vloxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl) {
   return vloxei8_v_i64m2(base, bindex, vl);
 }
 
@@ -651,7 +652,7 @@ vint64m2_t testuxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
-vint64m4_t testuxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl) {
+vint64m4_t test_vloxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl) {
   return vloxei8_v_i64m4(base, bindex, vl);
 }
 
@@ -661,7 +662,7 @@ vint64m4_t testuxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
-vint64m8_t testuxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl) {
+vint64m8_t test_vloxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl) {
   return vloxei8_v_i64m8(base, bindex, vl);
 }
 
@@ -671,7 +672,7 @@ vint64m8_t testuxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
-vint64m1_t testuxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t vl) {
+vint64m1_t test_vloxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t vl) {
   return vloxei16_v_i64m1(base, bindex, vl);
 }
 
@@ -681,7 +682,7 @@ vint64m1_t testuxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
-vint64m2_t testuxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t vl) {
+vint64m2_t test_vloxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t vl) {
   return vloxei16_v_i64m2(base, bindex, vl);
 }
 
@@ -691,7 +692,7 @@ vint64m2_t testuxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
-vint64m4_t testuxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl) {
+vint64m4_t test_vloxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl) {
   return vloxei16_v_i64m4(base, bindex, vl);
 }
 
@@ -701,7 +702,7 @@ vint64m4_t testuxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
-vint64m8_t testuxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl) {
+vint64m8_t test_vloxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl) {
   return vloxei16_v_i64m8(base, bindex, vl);
 }
 
@@ -711,7 +712,7 @@ vint64m8_t testuxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
-vint64m1_t testuxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t vl) {
+vint64m1_t test_vloxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t vl) {
   return vloxei32_v_i64m1(base, bindex, vl);
 }
 
@@ -721,7 +722,7 @@ vint64m1_t testuxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
-vint64m2_t testuxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl) {
+vint64m2_t test_vloxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl) {
   return vloxei32_v_i64m2(base, bindex, vl);
 }
 
@@ -731,7 +732,7 @@ vint64m2_t testuxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
-vint64m4_t testuxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl) {
+vint64m4_t test_vloxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl) {
   return vloxei32_v_i64m4(base, bindex, vl);
 }
 
@@ -741,7 +742,7 @@ vint64m4_t testuxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
-vint64m8_t testuxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl) {
+vint64m8_t test_vloxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl) {
   return vloxei32_v_i64m8(base, bindex, vl);
 }
 
@@ -751,7 +752,7 @@ vint64m8_t testuxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
-vint64m1_t testuxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl) {
+vint64m1_t test_vloxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl) {
   return vloxei64_v_i64m1(base, bindex, vl);
 }
 
@@ -761,7 +762,7 @@ vint64m1_t testuxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
-vint64m2_t testuxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl) {
+vint64m2_t test_vloxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl) {
   return vloxei64_v_i64m2(base, bindex, vl);
 }
 
@@ -771,7 +772,7 @@ vint64m2_t testuxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
-vint64m4_t testuxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl) {
+vint64m4_t test_vloxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl) {
   return vloxei64_v_i64m4(base, bindex, vl);
 }
 
@@ -781,7 +782,7 @@ vint64m4_t testuxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
-vint64m8_t testuxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl) {
+vint64m8_t test_vloxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl) {
   return vloxei64_v_i64m8(base, bindex, vl);
 }
 
@@ -791,7 +792,7 @@ vint64m8_t testuxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
-vuint8mf8_t testuxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
+vuint8mf8_t test_vloxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
   return vloxei8_v_u8mf8(base, bindex, vl);
 }
 
@@ -801,7 +802,7 @@ vuint8mf8_t testuxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
-vuint8mf4_t testuxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
+vuint8mf4_t test_vloxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
   return vloxei8_v_u8mf4(base, bindex, vl);
 }
 
@@ -811,7 +812,7 @@ vuint8mf4_t testuxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
-vuint8mf2_t testuxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
+vuint8mf2_t test_vloxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
   return vloxei8_v_u8mf2(base, bindex, vl);
 }
 
@@ -821,7 +822,7 @@ vuint8mf2_t testuxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
-vuint8m1_t testuxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) {
+vuint8m1_t test_vloxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) {
   return vloxei8_v_u8m1(base, bindex, vl);
 }
 
@@ -831,7 +832,7 @@ vuint8m1_t testuxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
-vuint8m2_t testuxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) {
+vuint8m2_t test_vloxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) {
   return vloxei8_v_u8m2(base, bindex, vl);
 }
 
@@ -841,7 +842,7 @@ vuint8m2_t testuxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
-vuint8m4_t testuxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) {
+vuint8m4_t test_vloxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) {
   return vloxei8_v_u8m4(base, bindex, vl);
 }
 
@@ -851,7 +852,7 @@ vuint8m4_t testuxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP1]]
 //
-vuint8m8_t testuxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) {
+vuint8m8_t test_vloxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) {
   return vloxei8_v_u8m8(base, bindex, vl);
 }
 
@@ -861,7 +862,7 @@ vuint8m8_t testuxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
-vuint8mf8_t testuxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
+vuint8mf8_t test_vloxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
   return vloxei16_v_u8mf8(base, bindex, vl);
 }
 
@@ -871,7 +872,7 @@ vuint8mf8_t testuxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
-vuint8mf4_t testuxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
+vuint8mf4_t test_vloxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
   return vloxei16_v_u8mf4(base, bindex, vl);
 }
 
@@ -881,7 +882,7 @@ vuint8mf4_t testuxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
-vuint8mf2_t testuxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t vl) {
+vuint8mf2_t test_vloxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t vl) {
   return vloxei16_v_u8mf2(base, bindex, vl);
 }
 
@@ -891,7 +892,7 @@ vuint8mf2_t testuxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
-vuint8m1_t testuxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl) {
+vuint8m1_t test_vloxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl) {
   return vloxei16_v_u8m1(base, bindex, vl);
 }
 
@@ -901,7 +902,7 @@ vuint8m1_t testuxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
-vuint8m2_t testuxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl) {
+vuint8m2_t test_vloxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl) {
   return vloxei16_v_u8m2(base, bindex, vl);
 }
 
@@ -911,7 +912,7 @@ vuint8m2_t testuxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
-vuint8m4_t testuxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl) {
+vuint8m4_t test_vloxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl) {
   return vloxei16_v_u8m4(base, bindex, vl);
 }
 
@@ -921,7 +922,7 @@ vuint8m4_t testuxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
-vuint8mf8_t testuxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
+vuint8mf8_t test_vloxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
   return vloxei32_v_u8mf8(base, bindex, vl);
 }
 
@@ -931,7 +932,7 @@ vuint8mf8_t testuxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
-vuint8mf4_t testuxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t vl) {
+vuint8mf4_t test_vloxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t vl) {
   return vloxei32_v_u8mf4(base, bindex, vl);
 }
 
@@ -941,7 +942,7 @@ vuint8mf4_t testuxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
-vuint8mf2_t testuxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t vl) {
+vuint8mf2_t test_vloxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t vl) {
   return vloxei32_v_u8mf2(base, bindex, vl);
 }
 
@@ -951,7 +952,7 @@ vuint8mf2_t testuxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
-vuint8m1_t testuxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl) {
+vuint8m1_t test_vloxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl) {
   return vloxei32_v_u8m1(base, bindex, vl);
 }
 
@@ -961,7 +962,7 @@ vuint8m1_t testuxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
-vuint8m2_t testuxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl) {
+vuint8m2_t test_vloxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl) {
   return vloxei32_v_u8m2(base, bindex, vl);
 }
 
@@ -971,7 +972,7 @@ vuint8m2_t testuxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
-vuint8mf8_t testuxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t vl) {
+vuint8mf8_t test_vloxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t vl) {
   return vloxei64_v_u8mf8(base, bindex, vl);
 }
 
@@ -981,7 +982,7 @@ vuint8mf8_t testuxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
-vuint8mf4_t testuxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t vl) {
+vuint8mf4_t test_vloxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t vl) {
   return vloxei64_v_u8mf4(base, bindex, vl);
 }
 
@@ -991,7 +992,7 @@ vuint8mf4_t testuxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
-vuint8mf2_t testuxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t vl) {
+vuint8mf2_t test_vloxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t vl) {
   return vloxei64_v_u8mf2(base, bindex, vl);
 }
 
@@ -1001,7 +1002,7 @@ vuint8mf2_t testuxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
-vuint8m1_t testuxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl) {
+vuint8m1_t test_vloxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl) {
   return vloxei64_v_u8m1(base, bindex, vl);
 }
 
@@ -1011,7 +1012,7 @@ vuint8m1_t testuxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
-vuint16mf4_t testuxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
+vuint16mf4_t test_vloxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
   return vloxei8_v_u16mf4(base, bindex, vl);
 }
 
@@ -1021,7 +1022,7 @@ vuint16mf4_t testuxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
-vuint16mf2_t testuxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
+vuint16mf2_t test_vloxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
   return vloxei8_v_u16mf2(base, bindex, vl);
 }
 
@@ -1031,7 +1032,7 @@ vuint16mf2_t testuxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
-vuint16m1_t testuxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
+vuint16m1_t test_vloxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
   return vloxei8_v_u16m1(base, bindex, vl);
 }
 
@@ -1041,7 +1042,7 @@ vuint16m1_t testuxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
-vuint16m2_t testuxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl) {
+vuint16m2_t test_vloxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl) {
   return vloxei8_v_u16m2(base, bindex, vl);
 }
 
@@ -1051,7 +1052,7 @@ vuint16m2_t testuxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
-vuint16m4_t testuxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl) {
+vuint16m4_t test_vloxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl) {
   return vloxei8_v_u16m4(base, bindex, vl);
 }
 
@@ -1061,7 +1062,7 @@ vuint16m4_t testuxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
-vuint16m8_t testuxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl) {
+vuint16m8_t test_vloxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl) {
   return vloxei8_v_u16m8(base, bindex, vl);
 }
 
@@ -1071,7 +1072,7 @@ vuint16m8_t testuxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
-vuint16mf4_t testuxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
+vuint16mf4_t test_vloxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
   return vloxei16_v_u16mf4(base, bindex, vl);
 }
 
@@ -1081,7 +1082,7 @@ vuint16mf4_t testuxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
-vuint16mf2_t testuxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
+vuint16mf2_t test_vloxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
   return vloxei16_v_u16mf2(base, bindex, vl);
 }
 
@@ -1091,7 +1092,7 @@ vuint16mf2_t testuxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
-vuint16m1_t testuxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t vl) {
+vuint16m1_t test_vloxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t vl) {
   return vloxei16_v_u16m1(base, bindex, vl);
 }
 
@@ -1101,7 +1102,7 @@ vuint16m1_t testuxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
-vuint16m2_t testuxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t vl) {
+vuint16m2_t test_vloxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t vl) {
   return vloxei16_v_u16m2(base, bindex, vl);
 }
 
@@ -1111,7 +1112,7 @@ vuint16m2_t testuxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
-vuint16m4_t testuxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t vl) {
+vuint16m4_t test_vloxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t vl) {
   return vloxei16_v_u16m4(base, bindex, vl);
 }
 
@@ -1121,7 +1122,7 @@ vuint16m4_t testuxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
-vuint16m8_t testuxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t vl) {
+vuint16m8_t test_vloxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t vl) {
   return vloxei16_v_u16m8(base, bindex, vl);
 }
 
@@ -1131,7 +1132,7 @@ vuint16m8_t testuxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
-vuint16mf4_t testuxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
+vuint16mf4_t test_vloxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
   return vloxei32_v_u16mf4(base, bindex, vl);
 }
 
@@ -1141,7 +1142,7 @@ vuint16mf4_t testuxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
-vuint16mf2_t testuxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_t vl) {
+vuint16mf2_t test_vloxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_t vl) {
   return vloxei32_v_u16mf2(base, bindex, vl);
 }
 
@@ -1151,7 +1152,7 @@ vuint16mf2_t testuxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
-vuint16m1_t testuxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t vl) {
+vuint16m1_t test_vloxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t vl) {
   return vloxei32_v_u16m1(base, bindex, vl);
 }
 
@@ -1161,7 +1162,7 @@ vuint16m1_t testuxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
-vuint16m2_t testuxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t vl) {
+vuint16m2_t test_vloxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t vl) {
   return vloxei32_v_u16m2(base, bindex, vl);
 }
 
@@ -1171,7 +1172,7 @@ vuint16m2_t testuxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
-vuint16m4_t testuxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t vl) {
+vuint16m4_t test_vloxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t vl) {
   return vloxei32_v_u16m4(base, bindex, vl);
 }
 
@@ -1181,7 +1182,7 @@ vuint16m4_t testuxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
-vuint16mf4_t testuxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_t vl) {
+vuint16mf4_t test_vloxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_t vl) {
   return vloxei64_v_u16mf4(base, bindex, vl);
 }
 
@@ -1191,7 +1192,7 @@ vuint16mf4_t testuxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
-vuint16mf2_t testuxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_t vl) {
+vuint16mf2_t test_vloxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_t vl) {
   return vloxei64_v_u16mf2(base, bindex, vl);
 }
 
@@ -1201,7 +1202,7 @@ vuint16mf2_t testuxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
-vuint16m1_t testuxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t vl) {
+vuint16m1_t test_vloxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t vl) {
   return vloxei64_v_u16m1(base, bindex, vl);
 }
 
@@ -1211,7 +1212,7 @@ vuint16m1_t testuxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
-vuint16m2_t testuxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t vl) {
+vuint16m2_t test_vloxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t vl) {
   return vloxei64_v_u16m2(base, bindex, vl);
 }
 
@@ -1221,7 +1222,7 @@ vuint16m2_t testuxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
-vuint32mf2_t testuxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
+vuint32mf2_t test_vloxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
   return vloxei8_v_u32mf2(base, bindex, vl);
 }
 
@@ -1231,7 +1232,7 @@ vuint32mf2_t testuxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
-vuint32m1_t testuxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
+vuint32m1_t test_vloxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
   return vloxei8_v_u32m1(base, bindex, vl);
 }
 
@@ -1241,7 +1242,7 @@ vuint32m1_t testuxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
-vuint32m2_t testuxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
+vuint32m2_t test_vloxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
   return vloxei8_v_u32m2(base, bindex, vl);
 }
 
@@ -1251,7 +1252,7 @@ vuint32m2_t testuxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
-vuint32m4_t testuxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl) {
+vuint32m4_t test_vloxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl) {
   return vloxei8_v_u32m4(base, bindex, vl);
 }
 
@@ -1261,7 +1262,7 @@ vuint32m4_t testuxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
-vuint32m8_t testuxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl) {
+vuint32m8_t test_vloxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl) {
   return vloxei8_v_u32m8(base, bindex, vl);
 }
 
@@ -1271,7 +1272,7 @@ vuint32m8_t testuxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
-vuint32mf2_t testuxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
+vuint32mf2_t test_vloxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
   return vloxei16_v_u32mf2(base, bindex, vl);
 }
 
@@ -1281,7 +1282,7 @@ vuint32mf2_t testuxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
-vuint32m1_t testuxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
+vuint32m1_t test_vloxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
   return vloxei16_v_u32m1(base, bindex, vl);
 }
 
@@ -1291,7 +1292,7 @@ vuint32m1_t testuxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
-vuint32m2_t testuxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t vl) {
+vuint32m2_t test_vloxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t vl) {
   return vloxei16_v_u32m2(base, bindex, vl);
 }
 
@@ -1301,7 +1302,7 @@ vuint32m2_t testuxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
-vuint32m4_t testuxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t vl) {
+vuint32m4_t test_vloxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t vl) {
   return vloxei16_v_u32m4(base, bindex, vl);
 }
 
@@ -1311,7 +1312,7 @@ vuint32m4_t testuxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
-vuint32m8_t testuxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t vl) {
+vuint32m8_t test_vloxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t vl) {
   return vloxei16_v_u32m8(base, bindex, vl);
 }
 
@@ -1321,7 +1322,7 @@ vuint32m8_t testuxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
-vuint32mf2_t testuxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
+vuint32mf2_t test_vloxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
   return vloxei32_v_u32mf2(base, bindex, vl);
 }
 
@@ -1331,7 +1332,7 @@ vuint32mf2_t testuxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
-vuint32m1_t testuxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t vl) {
+vuint32m1_t test_vloxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t vl) {
   return vloxei32_v_u32m1(base, bindex, vl);
 }
 
@@ -1341,7 +1342,7 @@ vuint32m1_t testuxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
-vuint32m2_t testuxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t vl) {
+vuint32m2_t test_vloxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t vl) {
   return vloxei32_v_u32m2(base, bindex, vl);
 }
 
@@ -1351,7 +1352,7 @@ vuint32m2_t testuxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
-vuint32m4_t testuxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t vl) {
+vuint32m4_t test_vloxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t vl) {
   return vloxei32_v_u32m4(base, bindex, vl);
 }
 
@@ -1361,7 +1362,7 @@ vuint32m4_t testuxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
-vuint32m8_t testuxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t vl) {
+vuint32m8_t test_vloxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t vl) {
   return vloxei32_v_u32m8(base, bindex, vl);
 }
 
@@ -1371,7 +1372,7 @@ vuint32m8_t testuxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
-vuint32mf2_t testuxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_t vl) {
+vuint32mf2_t test_vloxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_t vl) {
   return vloxei64_v_u32mf2(base, bindex, vl);
 }
 
@@ -1381,7 +1382,7 @@ vuint32mf2_t testuxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
-vuint32m1_t testuxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t vl) {
+vuint32m1_t test_vloxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t vl) {
   return vloxei64_v_u32m1(base, bindex, vl);
 }
 
@@ -1391,7 +1392,7 @@ vuint32m1_t testuxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
-vuint32m2_t testuxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t vl) {
+vuint32m2_t test_vloxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t vl) {
   return vloxei64_v_u32m2(base, bindex, vl);
 }
 
@@ -1401,7 +1402,7 @@ vuint32m2_t testuxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
-vuint32m4_t testuxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t vl) {
+vuint32m4_t test_vloxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t vl) {
   return vloxei64_v_u32m4(base, bindex, vl);
 }
 
@@ -1411,7 +1412,7 @@ vuint32m4_t testuxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
-vuint64m1_t testuxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
+vuint64m1_t test_vloxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
   return vloxei8_v_u64m1(base, bindex, vl);
 }
 
@@ -1421,7 +1422,7 @@ vuint64m1_t testuxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
-vuint64m2_t testuxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
+vuint64m2_t test_vloxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
   return vloxei8_v_u64m2(base, bindex, vl);
 }
 
@@ -1431,7 +1432,7 @@ vuint64m2_t testuxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
-vuint64m4_t testuxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
+vuint64m4_t test_vloxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
   return vloxei8_v_u64m4(base, bindex, vl);
 }
 
@@ -1441,7 +1442,7 @@ vuint64m4_t testuxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
-vuint64m8_t testuxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl) {
+vuint64m8_t test_vloxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl) {
   return vloxei8_v_u64m8(base, bindex, vl);
 }
 
@@ -1451,7 +1452,7 @@ vuint64m8_t testuxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
-vuint64m1_t testuxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
+vuint64m1_t test_vloxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
   return vloxei16_v_u64m1(base, bindex, vl);
 }
 
@@ -1461,7 +1462,7 @@ vuint64m1_t testuxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
-vuint64m2_t testuxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
+vuint64m2_t test_vloxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
   return vloxei16_v_u64m2(base, bindex, vl);
 }
 
@@ -1471,7 +1472,7 @@ vuint64m2_t testuxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
-vuint64m4_t testuxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t vl) {
+vuint64m4_t test_vloxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t vl) {
   return vloxei16_v_u64m4(base, bindex, vl);
 }
 
@@ -1481,7 +1482,7 @@ vuint64m4_t testuxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
-vuint64m8_t testuxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t vl) {
+vuint64m8_t test_vloxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t vl) {
   return vloxei16_v_u64m8(base, bindex, vl);
 }
 
@@ -1491,7 +1492,7 @@ vuint64m8_t testuxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
-vuint64m1_t testuxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
+vuint64m1_t test_vloxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
   return vloxei32_v_u64m1(base, bindex, vl);
 }
 
@@ -1501,7 +1502,7 @@ vuint64m1_t testuxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
-vuint64m2_t testuxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t vl) {
+vuint64m2_t test_vloxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t vl) {
   return vloxei32_v_u64m2(base, bindex, vl);
 }
 
@@ -1511,7 +1512,7 @@ vuint64m2_t testuxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
-vuint64m4_t testuxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t vl) {
+vuint64m4_t test_vloxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t vl) {
   return vloxei32_v_u64m4(base, bindex, vl);
 }
 
@@ -1521,7 +1522,7 @@ vuint64m4_t testuxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
-vuint64m8_t testuxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t vl) {
+vuint64m8_t test_vloxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t vl) {
   return vloxei32_v_u64m8(base, bindex, vl);
 }
 
@@ -1531,7 +1532,7 @@ vuint64m8_t testuxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
-vuint64m1_t testuxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t vl) {
+vuint64m1_t test_vloxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t vl) {
   return vloxei64_v_u64m1(base, bindex, vl);
 }
 
@@ -1541,7 +1542,7 @@ vuint64m1_t testuxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
-vuint64m2_t testuxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t vl) {
+vuint64m2_t test_vloxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t vl) {
   return vloxei64_v_u64m2(base, bindex, vl);
 }
 
@@ -1551,7 +1552,7 @@ vuint64m2_t testuxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
-vuint64m4_t testuxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t vl) {
+vuint64m4_t test_vloxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t vl) {
   return vloxei64_v_u64m4(base, bindex, vl);
 }
 
@@ -1561,7 +1562,7 @@ vuint64m4_t testuxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
-vuint64m8_t testuxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t vl) {
+vuint64m8_t test_vloxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t vl) {
   return vloxei64_v_u64m8(base, bindex, vl);
 }
 
@@ -1571,7 +1572,7 @@ vuint64m8_t testuxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i8.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
-vfloat32mf2_t testuxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_t vl) {
+vfloat32mf2_t test_vloxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_t vl) {
   return vloxei8_v_f32mf2(base, bindex, vl);
 }
 
@@ -1581,7 +1582,7 @@ vfloat32mf2_t testuxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i8.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
-vfloat32m1_t testuxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t vl) {
+vfloat32m1_t test_vloxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t vl) {
   return vloxei8_v_f32m1(base, bindex, vl);
 }
 
@@ -1591,7 +1592,7 @@ vfloat32m1_t testuxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i8.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
-vfloat32m2_t testuxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t vl) {
+vfloat32m2_t test_vloxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t vl) {
   return vloxei8_v_f32m2(base, bindex, vl);
 }
 
@@ -1601,7 +1602,7 @@ vfloat32m2_t testuxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i8.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
-vfloat32m4_t testuxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t vl) {
+vfloat32m4_t test_vloxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t vl) {
   return vloxei8_v_f32m4(base, bindex, vl);
 }
 
@@ -1611,7 +1612,7 @@ vfloat32m4_t testuxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i8.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
 //
-vfloat32m8_t testuxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t vl) {
+vfloat32m8_t test_vloxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t vl) {
   return vloxei8_v_f32m8(base, bindex, vl);
 }
 
@@ -1621,7 +1622,7 @@ vfloat32m8_t testuxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
-vfloat32mf2_t testuxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, size_t vl) {
+vfloat32mf2_t test_vloxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, size_t vl) {
   return vloxei16_v_f32mf2(base, bindex, vl);
 }
 
@@ -1631,7 +1632,7 @@ vfloat32mf2_t testuxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i16.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
-vfloat32m1_t testuxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_t vl) {
+vfloat32m1_t test_vloxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_t vl) {
   return vloxei16_v_f32m1(base, bindex, vl);
 }
 
@@ -1641,7 +1642,7 @@ vfloat32m1_t testuxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i16.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
-vfloat32m2_t testuxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t vl) {
+vfloat32m2_t test_vloxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t vl) {
   return vloxei16_v_f32m2(base, bindex, vl);
 }
 
@@ -1651,7 +1652,7 @@ vfloat32m2_t testuxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i16.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
-vfloat32m4_t testuxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t vl) {
+vfloat32m4_t test_vloxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t vl) {
   return vloxei16_v_f32m4(base, bindex, vl);
 }
 
@@ -1661,7 +1662,7 @@ vfloat32m4_t testuxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i16.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
 //
-vfloat32m8_t testuxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t vl) {
+vfloat32m8_t test_vloxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t vl) {
   return vloxei16_v_f32m8(base, bindex, vl);
 }
 
@@ -1671,7 +1672,7 @@ vfloat32m8_t testuxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i32.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
-vfloat32mf2_t testuxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, size_t vl) {
+vfloat32mf2_t test_vloxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, size_t vl) {
   return vloxei32_v_f32mf2(base, bindex, vl);
 }
 
@@ -1681,7 +1682,7 @@ vfloat32mf2_t testuxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i32.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
-vfloat32m1_t testuxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t vl) {
+vfloat32m1_t test_vloxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t vl) {
   return vloxei32_v_f32m1(base, bindex, vl);
 }
 
@@ -1691,7 +1692,7 @@ vfloat32m1_t testuxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i32.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
-vfloat32m2_t testuxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t vl) {
+vfloat32m2_t test_vloxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t vl) {
   return vloxei32_v_f32m2(base, bindex, vl);
 }
 
@@ -1701,7 +1702,7 @@ vfloat32m2_t testuxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i32.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
-vfloat32m4_t testuxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t vl) {
+vfloat32m4_t test_vloxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t vl) {
   return vloxei32_v_f32m4(base, bindex, vl);
 }
 
@@ -1711,7 +1712,7 @@ vfloat32m4_t testuxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i32.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
 //
-vfloat32m8_t testuxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t vl) {
+vfloat32m8_t test_vloxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t vl) {
   return vloxei32_v_f32m8(base, bindex, vl);
 }
 
@@ -1721,7 +1722,7 @@ vfloat32m8_t testuxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i64.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
-vfloat32mf2_t testuxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size_t vl) {
+vfloat32mf2_t test_vloxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size_t vl) {
   return vloxei64_v_f32mf2(base, bindex, vl);
 }
 
@@ -1731,7 +1732,7 @@ vfloat32mf2_t testuxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i64.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
-vfloat32m1_t testuxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t vl) {
+vfloat32m1_t test_vloxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t vl) {
   return vloxei64_v_f32m1(base, bindex, vl);
 }
 
@@ -1741,7 +1742,7 @@ vfloat32m1_t testuxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i64.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
-vfloat32m2_t testuxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t vl) {
+vfloat32m2_t test_vloxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t vl) {
   return vloxei64_v_f32m2(base, bindex, vl);
 }
 
@@ -1751,7 +1752,7 @@ vfloat32m2_t testuxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i64.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
-vfloat32m4_t testuxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t vl) {
+vfloat32m4_t test_vloxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t vl) {
   return vloxei64_v_f32m4(base, bindex, vl);
 }
 
@@ -1761,7 +1762,7 @@ vfloat32m4_t testuxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i8.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
-vfloat64m1_t testuxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t vl) {
+vfloat64m1_t test_vloxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t vl) {
   return vloxei8_v_f64m1(base, bindex, vl);
 }
 
@@ -1771,7 +1772,7 @@ vfloat64m1_t testuxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i8.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
-vfloat64m2_t testuxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t vl) {
+vfloat64m2_t test_vloxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t vl) {
   return vloxei8_v_f64m2(base, bindex, vl);
 }
 
@@ -1781,7 +1782,7 @@ vfloat64m2_t testuxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i8.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
-vfloat64m4_t testuxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t vl) {
+vfloat64m4_t test_vloxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t vl) {
   return vloxei8_v_f64m4(base, bindex, vl);
 }
 
@@ -1791,7 +1792,7 @@ vfloat64m4_t testuxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i8.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
-vfloat64m8_t testuxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t vl) {
+vfloat64m8_t test_vloxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t vl) {
   return vloxei8_v_f64m8(base, bindex, vl);
 }
 
@@ -1801,7 +1802,7 @@ vfloat64m8_t testuxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i16.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
-vfloat64m1_t testuxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size_t vl) {
+vfloat64m1_t test_vloxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size_t vl) {
   return vloxei16_v_f64m1(base, bindex, vl);
 }
 
@@ -1811,7 +1812,7 @@ vfloat64m1_t testuxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i16.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
-vfloat64m2_t testuxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size_t vl) {
+vfloat64m2_t test_vloxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size_t vl) {
   return vloxei16_v_f64m2(base, bindex, vl);
 }
 
@@ -1821,7 +1822,7 @@ vfloat64m2_t testuxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i16.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
-vfloat64m4_t testuxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_t vl) {
+vfloat64m4_t test_vloxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_t vl) {
   return vloxei16_v_f64m4(base, bindex, vl);
 }
 
@@ -1831,7 +1832,7 @@ vfloat64m4_t testuxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i16.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
-vfloat64m8_t testuxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_t vl) {
+vfloat64m8_t test_vloxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_t vl) {
   return vloxei16_v_f64m8(base, bindex, vl);
 }
 
@@ -1841,7 +1842,7 @@ vfloat64m8_t testuxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i32.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
-vfloat64m1_t testuxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size_t vl) {
+vfloat64m1_t test_vloxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size_t vl) {
   return vloxei32_v_f64m1(base, bindex, vl);
 }
 
@@ -1851,7 +1852,7 @@ vfloat64m1_t testuxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i32.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
-vfloat64m2_t testuxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_t vl) {
+vfloat64m2_t test_vloxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_t vl) {
   return vloxei32_v_f64m2(base, bindex, vl);
 }
 
@@ -1861,7 +1862,7 @@ vfloat64m2_t testuxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i32.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
-vfloat64m4_t testuxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_t vl) {
+vfloat64m4_t test_vloxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_t vl) {
   return vloxei32_v_f64m4(base, bindex, vl);
 }
 
@@ -1871,7 +1872,7 @@ vfloat64m4_t testuxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i32.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
-vfloat64m8_t testuxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_t vl) {
+vfloat64m8_t test_vloxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_t vl) {
   return vloxei32_v_f64m8(base, bindex, vl);
 }
 
@@ -1881,7 +1882,7 @@ vfloat64m8_t testuxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i64.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
-vfloat64m1_t testuxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_t vl) {
+vfloat64m1_t test_vloxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_t vl) {
   return vloxei64_v_f64m1(base, bindex, vl);
 }
 
@@ -1891,7 +1892,7 @@ vfloat64m1_t testuxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i64.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
-vfloat64m2_t testuxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_t vl) {
+vfloat64m2_t test_vloxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_t vl) {
   return vloxei64_v_f64m2(base, bindex, vl);
 }
 
@@ -1901,7 +1902,7 @@ vfloat64m2_t testuxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i64.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
-vfloat64m4_t testuxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_t vl) {
+vfloat64m4_t test_vloxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_t vl) {
   return vloxei64_v_f64m4(base, bindex, vl);
 }
 
@@ -1911,7 +1912,7 @@ vfloat64m4_t testuxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i64.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
-vfloat64m8_t testuxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t vl) {
+vfloat64m8_t test_vloxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t vl) {
   return vloxei64_v_f64m8(base, bindex, vl);
 }
 
@@ -1921,7 +1922,7 @@ vfloat64m8_t testuxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
-vint8mf8_t testuxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
+vint8mf8_t test_vloxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
   return vloxei8_v_i8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -1931,7 +1932,7 @@ vint8mf8_t testuxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
-vint8mf4_t testuxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
+vint8mf4_t test_vloxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
   return vloxei8_v_i8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -1941,7 +1942,7 @@ vint8mf4_t testuxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
-vint8mf2_t testuxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
+vint8mf2_t test_vloxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
   return vloxei8_v_i8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -1951,7 +1952,7 @@ vint8mf2_t testuxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
-vint8m1_t testuxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) {
+vint8m1_t test_vloxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) {
   return vloxei8_v_i8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -1961,7 +1962,7 @@ vint8m1_t testuxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *b
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
-vint8m2_t testuxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) {
+vint8m2_t test_vloxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) {
   return vloxei8_v_i8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -1971,7 +1972,7 @@ vint8m2_t testuxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *b
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
-vint8m4_t testuxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) {
+vint8m4_t test_vloxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) {
   return vloxei8_v_i8m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -1981,7 +1982,7 @@ vint8m4_t testuxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *b
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP1]]
 //
-vint8m8_t testuxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) {
+vint8m8_t test_vloxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) {
   return vloxei8_v_i8m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -1991,7 +1992,7 @@ vint8m8_t testuxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *b
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
-vint8mf8_t testuxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
+vint8mf8_t test_vloxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
   return vloxei16_v_i8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2001,7 +2002,7 @@ vint8mf8_t testuxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
-vint8mf4_t testuxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
+vint8mf4_t test_vloxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
   return vloxei16_v_i8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2011,7 +2012,7 @@ vint8mf4_t testuxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
-vint8mf2_t testuxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) {
+vint8mf2_t test_vloxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) {
   return vloxei16_v_i8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2021,7 +2022,7 @@ vint8mf2_t testuxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
-vint8m1_t testuxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) {
+vint8m1_t test_vloxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) {
   return vloxei16_v_i8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2031,7 +2032,7 @@ vint8m1_t testuxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
-vint8m2_t testuxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) {
+vint8m2_t test_vloxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) {
   return vloxei16_v_i8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2041,7 +2042,7 @@ vint8m2_t testuxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
-vint8m4_t testuxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) {
+vint8m4_t test_vloxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) {
   return vloxei16_v_i8m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2051,7 +2052,7 @@ vint8m4_t testuxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
-vint8mf8_t testuxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
+vint8mf8_t test_vloxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
   return vloxei32_v_i8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2061,7 +2062,7 @@ vint8mf8_t testuxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
-vint8mf4_t testuxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) {
+vint8mf4_t test_vloxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) {
   return vloxei32_v_i8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2071,7 +2072,7 @@ vint8mf4_t testuxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
-vint8mf2_t testuxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) {
+vint8mf2_t test_vloxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) {
   return vloxei32_v_i8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2081,7 +2082,7 @@ vint8mf2_t testuxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
-vint8m1_t testuxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) {
+vint8m1_t test_vloxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) {
   return vloxei32_v_i8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2091,7 +2092,7 @@ vint8m1_t testuxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
-vint8m2_t testuxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) {
+vint8m2_t test_vloxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) {
   return vloxei32_v_i8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2101,7 +2102,7 @@ vint8m2_t testuxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
-vint8mf8_t testuxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) {
+vint8mf8_t test_vloxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) {
   return vloxei64_v_i8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2111,7 +2112,7 @@ vint8mf8_t testuxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
-vint8mf4_t testuxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) {
+vint8mf4_t test_vloxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) {
   return vloxei64_v_i8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2121,7 +2122,7 @@ vint8mf4_t testuxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
-vint8mf2_t testuxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) {
+vint8mf2_t test_vloxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) {
   return vloxei64_v_i8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2131,7 +2132,7 @@ vint8mf2_t testuxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
-vint8m1_t testuxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) {
+vint8m1_t test_vloxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) {
   return vloxei64_v_i8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2141,7 +2142,7 @@ vint8m1_t testuxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
-vint16mf4_t testuxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
+vint16mf4_t test_vloxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
   return vloxei8_v_i16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2151,7 +2152,7 @@ vint16mf4_t testuxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const in
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
-vint16mf2_t testuxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
+vint16mf2_t test_vloxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
   return vloxei8_v_i16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2161,7 +2162,7 @@ vint16mf2_t testuxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const in
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
-vint16m1_t testuxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
+vint16m1_t test_vloxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
   return vloxei8_v_i16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2171,7 +2172,7 @@ vint16m1_t testuxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
-vint16m2_t testuxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) {
+vint16m2_t test_vloxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) {
   return vloxei8_v_i16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2181,7 +2182,7 @@ vint16m2_t testuxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
-vint16m4_t testuxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) {
+vint16m4_t test_vloxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) {
   return vloxei8_v_i16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2191,7 +2192,7 @@ vint16m4_t testuxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
-vint16m8_t testuxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) {
+vint16m8_t test_vloxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) {
   return vloxei8_v_i16m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2201,7 +2202,7 @@ vint16m8_t testuxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
-vint16mf4_t testuxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
+vint16mf4_t test_vloxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
   return vloxei16_v_i16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2211,7 +2212,7 @@ vint16mf4_t testuxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const i
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
-vint16mf2_t testuxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
+vint16mf2_t test_vloxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
   return vloxei16_v_i16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2221,7 +2222,7 @@ vint16mf2_t testuxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const i
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
-vint16m1_t testuxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) {
+vint16m1_t test_vloxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) {
   return vloxei16_v_i16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2231,7 +2232,7 @@ vint16m1_t testuxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int1
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
-vint16m2_t testuxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) {
+vint16m2_t test_vloxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) {
   return vloxei16_v_i16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2241,7 +2242,7 @@ vint16m2_t testuxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
-vint16m4_t testuxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) {
+vint16m4_t test_vloxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) {
   return vloxei16_v_i16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2251,7 +2252,7 @@ vint16m4_t testuxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
-vint16m8_t testuxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) {
+vint16m8_t test_vloxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) {
   return vloxei16_v_i16m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2261,7 +2262,7 @@ vint16m8_t testuxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
-vint16mf4_t testuxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
+vint16mf4_t test_vloxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
   return vloxei32_v_i16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2271,7 +2272,7 @@ vint16mf4_t testuxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const i
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
-vint16mf2_t testuxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) {
+vint16mf2_t test_vloxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) {
   return vloxei32_v_i16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2281,7 +2282,7 @@ vint16mf2_t testuxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const i
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
-vint16m1_t testuxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) {
+vint16m1_t test_vloxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) {
   return vloxei32_v_i16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2291,7 +2292,7 @@ vint16m1_t testuxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int1
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
-vint16m2_t testuxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) {
+vint16m2_t test_vloxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) {
   return vloxei32_v_i16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2301,7 +2302,7 @@ vint16m2_t testuxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
-vint16m4_t testuxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) {
+vint16m4_t test_vloxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) {
   return vloxei32_v_i16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2311,7 +2312,7 @@ vint16m4_t testuxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
-vint16mf4_t testuxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) {
+vint16mf4_t test_vloxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) {
   return vloxei64_v_i16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2321,7 +2322,7 @@ vint16mf4_t testuxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const i
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
-vint16mf2_t testuxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) {
+vint16mf2_t test_vloxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) {
   return vloxei64_v_i16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2331,7 +2332,7 @@ vint16mf2_t testuxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const i
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
-vint16m1_t testuxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) {
+vint16m1_t test_vloxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) {
   return vloxei64_v_i16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2341,7 +2342,7 @@ vint16m1_t testuxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int1
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
-vint16m2_t testuxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) {
+vint16m2_t test_vloxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) {
   return vloxei64_v_i16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2351,7 +2352,7 @@ vint16m2_t testuxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
-vint32mf2_t testuxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
+vint32mf2_t test_vloxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
   return vloxei8_v_i32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2361,7 +2362,7 @@ vint32mf2_t testuxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const in
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
-vint32m1_t testuxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
+vint32m1_t test_vloxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
   return vloxei8_v_i32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2371,7 +2372,7 @@ vint32m1_t testuxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
-vint32m2_t testuxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
+vint32m2_t test_vloxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
   return vloxei8_v_i32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2381,7 +2382,7 @@ vint32m2_t testuxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
-vint32m4_t testuxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) {
+vint32m4_t test_vloxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) {
   return vloxei8_v_i32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2391,7 +2392,7 @@ vint32m4_t testuxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
-vint32m8_t testuxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) {
+vint32m8_t test_vloxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) {
   return vloxei8_v_i32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2401,7 +2402,7 @@ vint32m8_t testuxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
-vint32mf2_t testuxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
+vint32mf2_t test_vloxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
   return vloxei16_v_i32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2411,7 +2412,7 @@ vint32mf2_t testuxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const i
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
-vint32m1_t testuxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
+vint32m1_t test_vloxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
   return vloxei16_v_i32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2421,7 +2422,7 @@ vint32m1_t testuxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int3
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
-vint32m2_t testuxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) {
+vint32m2_t test_vloxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) {
   return vloxei16_v_i32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2431,7 +2432,7 @@ vint32m2_t testuxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int3
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
-vint32m4_t testuxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) {
+vint32m4_t test_vloxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) {
   return vloxei16_v_i32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2441,7 +2442,7 @@ vint32m4_t testuxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
-vint32m8_t testuxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) {
+vint32m8_t test_vloxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) {
   return vloxei16_v_i32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2451,7 +2452,7 @@ vint32m8_t testuxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
-vint32mf2_t testuxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+vint32mf2_t test_vloxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
   return vloxei32_v_i32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2461,7 +2462,7 @@ vint32mf2_t testuxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const i
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
-vint32m1_t testuxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) {
+vint32m1_t test_vloxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) {
   return vloxei32_v_i32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2471,7 +2472,7 @@ vint32m1_t testuxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int3
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
-vint32m2_t testuxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) {
+vint32m2_t test_vloxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) {
   return vloxei32_v_i32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2481,7 +2482,7 @@ vint32m2_t testuxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int3
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
-vint32m4_t testuxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) {
+vint32m4_t test_vloxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) {
   return vloxei32_v_i32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2491,7 +2492,7 @@ vint32m4_t testuxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
-vint32m8_t testuxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) {
+vint32m8_t test_vloxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) {
   return vloxei32_v_i32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2501,7 +2502,7 @@ vint32m8_t testuxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
-vint32mf2_t testuxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) {
+vint32mf2_t test_vloxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) {
   return vloxei64_v_i32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2511,7 +2512,7 @@ vint32mf2_t testuxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const i
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
-vint32m1_t testuxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) {
+vint32m1_t test_vloxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) {
   return vloxei64_v_i32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2521,7 +2522,7 @@ vint32m1_t testuxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int3
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
-vint32m2_t testuxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) {
+vint32m2_t test_vloxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) {
   return vloxei64_v_i32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2531,7 +2532,7 @@ vint32m2_t testuxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int3
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
-vint32m4_t testuxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) {
+vint32m4_t test_vloxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) {
   return vloxei64_v_i32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2541,7 +2542,7 @@ vint32m4_t testuxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
-vint64m1_t testuxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
+vint64m1_t test_vloxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
   return vloxei8_v_i64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2551,7 +2552,7 @@ vint64m1_t testuxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
-vint64m2_t testuxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
+vint64m2_t test_vloxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
   return vloxei8_v_i64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2561,7 +2562,7 @@ vint64m2_t testuxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
-vint64m4_t testuxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) {
+vint64m4_t test_vloxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) {
   return vloxei8_v_i64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2571,7 +2572,7 @@ vint64m4_t testuxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
-vint64m8_t testuxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) {
+vint64m8_t test_vloxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) {
   return vloxei8_v_i64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2581,7 +2582,7 @@ vint64m8_t testuxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
-vint64m1_t testuxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
+vint64m1_t test_vloxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
   return vloxei16_v_i64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2591,7 +2592,7 @@ vint64m1_t testuxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int6
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
-vint64m2_t testuxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
+vint64m2_t test_vloxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
   return vloxei16_v_i64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2601,7 +2602,7 @@ vint64m2_t testuxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int6
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
-vint64m4_t testuxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) {
+vint64m4_t test_vloxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) {
   return vloxei16_v_i64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2611,7 +2612,7 @@ vint64m4_t testuxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int6
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
-vint64m8_t testuxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) {
+vint64m8_t test_vloxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) {
   return vloxei16_v_i64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2621,7 +2622,7 @@ vint64m8_t testuxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
-vint64m1_t testuxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
+vint64m1_t test_vloxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
   return vloxei32_v_i64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2631,7 +2632,7 @@ vint64m1_t testuxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int6
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
-vint64m2_t testuxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) {
+vint64m2_t test_vloxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) {
   return vloxei32_v_i64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2641,7 +2642,7 @@ vint64m2_t testuxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int6
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
-vint64m4_t testuxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) {
+vint64m4_t test_vloxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) {
   return vloxei32_v_i64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2651,7 +2652,7 @@ vint64m4_t testuxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int6
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
-vint64m8_t testuxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) {
+vint64m8_t test_vloxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) {
   return vloxei32_v_i64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2661,7 +2662,7 @@ vint64m8_t testuxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
-vint64m1_t testuxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) {
+vint64m1_t test_vloxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) {
   return vloxei64_v_i64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2671,7 +2672,7 @@ vint64m1_t testuxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int6
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
-vint64m2_t testuxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) {
+vint64m2_t test_vloxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) {
   return vloxei64_v_i64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2681,7 +2682,7 @@ vint64m2_t testuxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int6
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
-vint64m4_t testuxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) {
+vint64m4_t test_vloxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) {
   return vloxei64_v_i64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2691,7 +2692,7 @@ vint64m4_t testuxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int6
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
-vint64m8_t testuxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) {
+vint64m8_t test_vloxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) {
   return vloxei64_v_i64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2701,7 +2702,7 @@ vint64m8_t testuxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
-vuint8mf8_t testuxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
+vuint8mf8_t test_vloxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
   return vloxei8_v_u8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2711,7 +2712,7 @@ vuint8mf8_t testuxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
-vuint8mf4_t testuxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
+vuint8mf4_t test_vloxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
   return vloxei8_v_u8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2721,7 +2722,7 @@ vuint8mf4_t testuxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
-vuint8mf2_t testuxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
+vuint8mf2_t test_vloxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
   return vloxei8_v_u8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2731,7 +2732,7 @@ vuint8mf2_t testuxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
-vuint8m1_t testuxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
+vuint8m1_t test_vloxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
   return vloxei8_v_u8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2741,7 +2742,7 @@ vuint8m1_t testuxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
-vuint8m2_t testuxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
+vuint8m2_t test_vloxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
   return vloxei8_v_u8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2751,7 +2752,7 @@ vuint8m2_t testuxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
-vuint8m4_t testuxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) {
+vuint8m4_t test_vloxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) {
   return vloxei8_v_u8m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2761,7 +2762,7 @@ vuint8m4_t testuxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP1]]
 //
-vuint8m8_t testuxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) {
+vuint8m8_t test_vloxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) {
   return vloxei8_v_u8m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2771,7 +2772,7 @@ vuint8m8_t testuxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
-vuint8mf8_t testuxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
+vuint8mf8_t test_vloxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
   return vloxei16_v_u8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2781,7 +2782,7 @@ vuint8mf8_t testuxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
-vuint8mf4_t testuxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
+vuint8mf4_t test_vloxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
   return vloxei16_v_u8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2791,7 +2792,7 @@ vuint8mf4_t testuxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
-vuint8mf2_t testuxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
+vuint8mf2_t test_vloxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
   return vloxei16_v_u8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2801,7 +2802,7 @@ vuint8mf2_t testuxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
-vuint8m1_t testuxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
+vuint8m1_t test_vloxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
   return vloxei16_v_u8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2811,7 +2812,7 @@ vuint8m1_t testuxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
-vuint8m2_t testuxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
+vuint8m2_t test_vloxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
   return vloxei16_v_u8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2821,7 +2822,7 @@ vuint8m2_t testuxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
-vuint8m4_t testuxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) {
+vuint8m4_t test_vloxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) {
   return vloxei16_v_u8m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2831,7 +2832,7 @@ vuint8m4_t testuxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
-vuint8mf8_t testuxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
+vuint8mf8_t test_vloxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
   return vloxei32_v_u8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2841,7 +2842,7 @@ vuint8mf8_t testuxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
-vuint8mf4_t testuxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
+vuint8mf4_t test_vloxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
   return vloxei32_v_u8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2851,7 +2852,7 @@ vuint8mf4_t testuxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
-vuint8mf2_t testuxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
+vuint8mf2_t test_vloxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
   return vloxei32_v_u8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2861,7 +2862,7 @@ vuint8mf2_t testuxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
-vuint8m1_t testuxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
+vuint8m1_t test_vloxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
   return vloxei32_v_u8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2871,7 +2872,7 @@ vuint8m1_t testuxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
-vuint8m2_t testuxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
+vuint8m2_t test_vloxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
   return vloxei32_v_u8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2881,7 +2882,7 @@ vuint8m2_t testuxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
-vuint8mf8_t testuxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
+vuint8mf8_t test_vloxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
   return vloxei64_v_u8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2891,7 +2892,7 @@ vuint8mf8_t testuxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
-vuint8mf4_t testuxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
+vuint8mf4_t test_vloxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
   return vloxei64_v_u8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2901,7 +2902,7 @@ vuint8mf4_t testuxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
-vuint8mf2_t testuxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
+vuint8mf2_t test_vloxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
   return vloxei64_v_u8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2911,7 +2912,7 @@ vuint8mf2_t testuxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
-vuint8m1_t testuxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
+vuint8m1_t test_vloxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
   return vloxei64_v_u8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2921,7 +2922,7 @@ vuint8m1_t testuxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
-vuint16mf4_t testuxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
+vuint16mf4_t test_vloxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
   return vloxei8_v_u16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2931,7 +2932,7 @@ vuint16mf4_t testuxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
-vuint16mf2_t testuxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
+vuint16mf2_t test_vloxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
   return vloxei8_v_u16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2941,7 +2942,7 @@ vuint16mf2_t testuxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
-vuint16m1_t testuxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
+vuint16m1_t test_vloxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
   return vloxei8_v_u16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2951,7 +2952,7 @@ vuint16m1_t testuxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
-vuint16m2_t testuxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
+vuint16m2_t test_vloxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
   return vloxei8_v_u16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2961,7 +2962,7 @@ vuint16m2_t testuxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
-vuint16m4_t testuxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) {
+vuint16m4_t test_vloxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) {
   return vloxei8_v_u16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2971,7 +2972,7 @@ vuint16m4_t testuxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
-vuint16m8_t testuxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) {
+vuint16m8_t test_vloxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) {
   return vloxei8_v_u16m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2981,7 +2982,7 @@ vuint16m8_t testuxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
-vuint16mf4_t testuxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
+vuint16mf4_t test_vloxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
   return vloxei16_v_u16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2991,7 +2992,7 @@ vuint16mf4_t testuxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
-vuint16mf2_t testuxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
+vuint16mf2_t test_vloxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
   return vloxei16_v_u16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3001,7 +3002,7 @@ vuint16mf2_t testuxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
-vuint16m1_t testuxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
+vuint16m1_t test_vloxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
   return vloxei16_v_u16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3011,7 +3012,7 @@ vuint16m1_t testuxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
-vuint16m2_t testuxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
+vuint16m2_t test_vloxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
   return vloxei16_v_u16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3021,7 +3022,7 @@ vuint16m2_t testuxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
-vuint16m4_t testuxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) {
+vuint16m4_t test_vloxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) {
   return vloxei16_v_u16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3031,7 +3032,7 @@ vuint16m4_t testuxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
-vuint16m8_t testuxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) {
+vuint16m8_t test_vloxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) {
   return vloxei16_v_u16m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3041,7 +3042,7 @@ vuint16m8_t testuxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
-vuint16mf4_t testuxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
+vuint16mf4_t test_vloxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
   return vloxei32_v_u16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3051,7 +3052,7 @@ vuint16mf4_t testuxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
-vuint16mf2_t testuxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
+vuint16mf2_t test_vloxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
   return vloxei32_v_u16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3061,7 +3062,7 @@ vuint16mf2_t testuxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
-vuint16m1_t testuxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
+vuint16m1_t test_vloxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
   return vloxei32_v_u16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3071,7 +3072,7 @@ vuint16m1_t testuxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
-vuint16m2_t testuxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
+vuint16m2_t test_vloxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
   return vloxei32_v_u16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3081,7 +3082,7 @@ vuint16m2_t testuxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
-vuint16m4_t testuxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) {
+vuint16m4_t test_vloxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) {
   return vloxei32_v_u16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3091,7 +3092,7 @@ vuint16m4_t testuxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
-vuint16mf4_t testuxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
+vuint16mf4_t test_vloxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
   return vloxei64_v_u16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3101,7 +3102,7 @@ vuint16mf4_t testuxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
-vuint16mf2_t testuxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
+vuint16mf2_t test_vloxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
   return vloxei64_v_u16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3111,7 +3112,7 @@ vuint16mf2_t testuxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
-vuint16m1_t testuxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
+vuint16m1_t test_vloxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
   return vloxei64_v_u16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3121,7 +3122,7 @@ vuint16m1_t testuxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
-vuint16m2_t testuxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
+vuint16m2_t test_vloxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
   return vloxei64_v_u16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3131,7 +3132,7 @@ vuint16m2_t testuxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
-vuint32mf2_t testuxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
+vuint32mf2_t test_vloxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
   return vloxei8_v_u32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3141,7 +3142,7 @@ vuint32mf2_t testuxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
-vuint32m1_t testuxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
+vuint32m1_t test_vloxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
   return vloxei8_v_u32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3151,7 +3152,7 @@ vuint32m1_t testuxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
-vuint32m2_t testuxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
+vuint32m2_t test_vloxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
   return vloxei8_v_u32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3161,7 +3162,7 @@ vuint32m2_t testuxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
-vuint32m4_t testuxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) {
+vuint32m4_t test_vloxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) {
   return vloxei8_v_u32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3171,7 +3172,7 @@ vuint32m4_t testuxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
-vuint32m8_t testuxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) {
+vuint32m8_t test_vloxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) {
   return vloxei8_v_u32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3181,7 +3182,7 @@ vuint32m8_t testuxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
-vuint32mf2_t testuxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
+vuint32mf2_t test_vloxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
   return vloxei16_v_u32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3191,7 +3192,7 @@ vuint32mf2_t testuxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
-vuint32m1_t testuxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
+vuint32m1_t test_vloxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
   return vloxei16_v_u32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3201,7 +3202,7 @@ vuint32m1_t testuxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
-vuint32m2_t testuxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
+vuint32m2_t test_vloxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
   return vloxei16_v_u32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3211,7 +3212,7 @@ vuint32m2_t testuxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
-vuint32m4_t testuxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) {
+vuint32m4_t test_vloxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) {
   return vloxei16_v_u32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3221,7 +3222,7 @@ vuint32m4_t testuxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
-vuint32m8_t testuxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) {
+vuint32m8_t test_vloxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) {
   return vloxei16_v_u32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3231,7 +3232,7 @@ vuint32m8_t testuxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
-vuint32mf2_t testuxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
+vuint32mf2_t test_vloxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
   return vloxei32_v_u32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3241,7 +3242,7 @@ vuint32mf2_t testuxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
-vuint32m1_t testuxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
+vuint32m1_t test_vloxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
   return vloxei32_v_u32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3251,7 +3252,7 @@ vuint32m1_t testuxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
-vuint32m2_t testuxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
+vuint32m2_t test_vloxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
   return vloxei32_v_u32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3261,7 +3262,7 @@ vuint32m2_t testuxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
-vuint32m4_t testuxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) {
+vuint32m4_t test_vloxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) {
   return vloxei32_v_u32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3271,7 +3272,7 @@ vuint32m4_t testuxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
-vuint32m8_t testuxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) {
+vuint32m8_t test_vloxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) {
   return vloxei32_v_u32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3281,7 +3282,7 @@ vuint32m8_t testuxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
-vuint32mf2_t testuxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
+vuint32mf2_t test_vloxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
   return vloxei64_v_u32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3291,7 +3292,7 @@ vuint32mf2_t testuxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
-vuint32m1_t testuxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
+vuint32m1_t test_vloxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
   return vloxei64_v_u32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3301,7 +3302,7 @@ vuint32m1_t testuxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
-vuint32m2_t testuxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
+vuint32m2_t test_vloxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
   return vloxei64_v_u32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3311,7 +3312,7 @@ vuint32m2_t testuxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
-vuint32m4_t testuxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) {
+vuint32m4_t test_vloxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) {
   return vloxei64_v_u32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3321,7 +3322,7 @@ vuint32m4_t testuxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
-vuint64m1_t testuxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
+vuint64m1_t test_vloxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
   return vloxei8_v_u64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3331,7 +3332,7 @@ vuint64m1_t testuxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
-vuint64m2_t testuxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
+vuint64m2_t test_vloxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
   return vloxei8_v_u64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3341,7 +3342,7 @@ vuint64m2_t testuxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
-vuint64m4_t testuxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
+vuint64m4_t test_vloxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
   return vloxei8_v_u64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3351,7 +3352,7 @@ vuint64m4_t testuxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
-vuint64m8_t testuxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) {
+vuint64m8_t test_vloxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) {
   return vloxei8_v_u64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3361,7 +3362,7 @@ vuint64m8_t testuxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
-vuint64m1_t testuxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
+vuint64m1_t test_vloxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
   return vloxei16_v_u64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3371,7 +3372,7 @@ vuint64m1_t testuxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
-vuint64m2_t testuxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
+vuint64m2_t test_vloxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
   return vloxei16_v_u64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3381,7 +3382,7 @@ vuint64m2_t testuxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
-vuint64m4_t testuxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) {
+vuint64m4_t test_vloxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) {
   return vloxei16_v_u64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3391,7 +3392,7 @@ vuint64m4_t testuxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
-vuint64m8_t testuxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) {
+vuint64m8_t test_vloxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) {
   return vloxei16_v_u64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3401,7 +3402,7 @@ vuint64m8_t testuxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
-vuint64m1_t testuxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
+vuint64m1_t test_vloxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
   return vloxei32_v_u64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3411,7 +3412,7 @@ vuint64m1_t testuxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
-vuint64m2_t testuxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
+vuint64m2_t test_vloxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
   return vloxei32_v_u64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3421,7 +3422,7 @@ vuint64m2_t testuxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
-vuint64m4_t testuxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) {
+vuint64m4_t test_vloxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) {
   return vloxei32_v_u64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3431,7 +3432,7 @@ vuint64m4_t testuxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
-vuint64m8_t testuxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) {
+vuint64m8_t test_vloxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) {
   return vloxei32_v_u64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3441,7 +3442,7 @@ vuint64m8_t testuxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
-vuint64m1_t testuxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
+vuint64m1_t test_vloxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
   return vloxei64_v_u64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3451,7 +3452,7 @@ vuint64m1_t testuxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
-vuint64m2_t testuxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
+vuint64m2_t test_vloxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
   return vloxei64_v_u64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3461,7 +3462,7 @@ vuint64m2_t testuxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
-vuint64m4_t testuxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) {
+vuint64m4_t test_vloxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) {
   return vloxei64_v_u64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3471,7 +3472,7 @@ vuint64m4_t testuxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
-vuint64m8_t testuxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) {
+vuint64m8_t test_vloxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) {
   return vloxei64_v_u64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3481,7 +3482,7 @@ vuint64m8_t testuxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
-vfloat32mf2_t testuxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) {
+vfloat32mf2_t test_vloxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) {
   return vloxei8_v_f32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3491,7 +3492,7 @@ vfloat32mf2_t testuxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, cons
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
-vfloat32m1_t testuxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) {
+vfloat32m1_t test_vloxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) {
   return vloxei8_v_f32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3501,7 +3502,7 @@ vfloat32m1_t testuxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const f
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
-vfloat32m2_t testuxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) {
+vfloat32m2_t test_vloxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) {
   return vloxei8_v_f32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3511,7 +3512,7 @@ vfloat32m2_t testuxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const f
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
-vfloat32m4_t testuxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) {
+vfloat32m4_t test_vloxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) {
   return vloxei8_v_f32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3521,7 +3522,7 @@ vfloat32m4_t testuxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const fl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
 //
-vfloat32m8_t testuxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) {
+vfloat32m8_t test_vloxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) {
   return vloxei8_v_f32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3531,7 +3532,7 @@ vfloat32m8_t testuxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const fl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
-vfloat32mf2_t testuxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) {
+vfloat32mf2_t test_vloxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) {
   return vloxei16_v_f32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3541,7 +3542,7 @@ vfloat32mf2_t testuxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, con
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
-vfloat32m1_t testuxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) {
+vfloat32m1_t test_vloxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) {
   return vloxei16_v_f32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3551,7 +3552,7 @@ vfloat32m1_t testuxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
-vfloat32m2_t testuxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) {
+vfloat32m2_t test_vloxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) {
   return vloxei16_v_f32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3561,7 +3562,7 @@ vfloat32m2_t testuxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
-vfloat32m4_t testuxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) {
+vfloat32m4_t test_vloxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) {
   return vloxei16_v_f32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3571,7 +3572,7 @@ vfloat32m4_t testuxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const f
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
 //
-vfloat32m8_t testuxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) {
+vfloat32m8_t test_vloxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) {
   return vloxei16_v_f32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3581,7 +3582,7 @@ vfloat32m8_t testuxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const f
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
-vfloat32mf2_t testuxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) {
+vfloat32mf2_t test_vloxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) {
   return vloxei32_v_f32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3591,7 +3592,7 @@ vfloat32mf2_t testuxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, con
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
-vfloat32m1_t testuxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) {
+vfloat32m1_t test_vloxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) {
   return vloxei32_v_f32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3601,7 +3602,7 @@ vfloat32m1_t testuxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
-vfloat32m2_t testuxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) {
+vfloat32m2_t test_vloxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) {
   return vloxei32_v_f32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3611,7 +3612,7 @@ vfloat32m2_t testuxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
-vfloat32m4_t testuxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) {
+vfloat32m4_t test_vloxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) {
   return vloxei32_v_f32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3621,7 +3622,7 @@ vfloat32m4_t testuxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const f
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
 //
-vfloat32m8_t testuxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) {
+vfloat32m8_t test_vloxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) {
   return vloxei32_v_f32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3631,7 +3632,7 @@ vfloat32m8_t testuxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const f
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
-vfloat32mf2_t testuxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) {
+vfloat32mf2_t test_vloxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) {
   return vloxei64_v_f32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3641,7 +3642,7 @@ vfloat32mf2_t testuxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, con
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
-vfloat32m1_t testuxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) {
+vfloat32m1_t test_vloxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) {
   return vloxei64_v_f32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3651,7 +3652,7 @@ vfloat32m1_t testuxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
-vfloat32m2_t testuxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) {
+vfloat32m2_t test_vloxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) {
   return vloxei64_v_f32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3661,7 +3662,7 @@ vfloat32m2_t testuxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
-vfloat32m4_t testuxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) {
+vfloat32m4_t test_vloxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) {
   return vloxei64_v_f32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3671,7 +3672,7 @@ vfloat32m4_t testuxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const f
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
-vfloat64m1_t testuxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) {
+vfloat64m1_t test_vloxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) {
   return vloxei8_v_f64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3681,7 +3682,7 @@ vfloat64m1_t testuxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const d
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
-vfloat64m2_t testuxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) {
+vfloat64m2_t test_vloxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) {
   return vloxei8_v_f64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3691,7 +3692,7 @@ vfloat64m2_t testuxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const d
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
-vfloat64m4_t testuxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) {
+vfloat64m4_t test_vloxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) {
   return vloxei8_v_f64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3701,7 +3702,7 @@ vfloat64m4_t testuxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const d
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
-vfloat64m8_t testuxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) {
+vfloat64m8_t test_vloxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) {
   return vloxei8_v_f64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3711,7 +3712,7 @@ vfloat64m8_t testuxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const do
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
-vfloat64m1_t testuxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) {
+vfloat64m1_t test_vloxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) {
   return vloxei16_v_f64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3721,7 +3722,7 @@ vfloat64m1_t testuxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
-vfloat64m2_t testuxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) {
+vfloat64m2_t test_vloxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) {
   return vloxei16_v_f64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3731,7 +3732,7 @@ vfloat64m2_t testuxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
-vfloat64m4_t testuxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) {
+vfloat64m4_t test_vloxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) {
   return vloxei16_v_f64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3741,7 +3742,7 @@ vfloat64m4_t testuxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
-vfloat64m8_t testuxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) {
+vfloat64m8_t test_vloxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) {
   return vloxei16_v_f64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3751,7 +3752,7 @@ vfloat64m8_t testuxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const d
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
-vfloat64m1_t testuxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) {
+vfloat64m1_t test_vloxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) {
   return vloxei32_v_f64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3761,7 +3762,7 @@ vfloat64m1_t testuxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
-vfloat64m2_t testuxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) {
+vfloat64m2_t test_vloxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) {
   return vloxei32_v_f64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3771,7 +3772,7 @@ vfloat64m2_t testuxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
-vfloat64m4_t testuxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) {
+vfloat64m4_t test_vloxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) {
   return vloxei32_v_f64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3781,7 +3782,7 @@ vfloat64m4_t testuxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
-vfloat64m8_t testuxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) {
+vfloat64m8_t test_vloxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) {
   return vloxei32_v_f64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3791,7 +3792,7 @@ vfloat64m8_t testuxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const d
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
-vfloat64m1_t testuxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) {
+vfloat64m1_t test_vloxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) {
   return vloxei64_v_f64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3801,7 +3802,7 @@ vfloat64m1_t testuxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
-vfloat64m2_t testuxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) {
+vfloat64m2_t test_vloxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) {
   return vloxei64_v_f64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3811,7 +3812,7 @@ vfloat64m2_t testuxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
-vfloat64m4_t testuxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) {
+vfloat64m4_t test_vloxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) {
   return vloxei64_v_f64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3821,6 +3822,426 @@ vfloat64m4_t testuxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
-vfloat64m8_t testuxei64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) {
+vfloat64m8_t test_vloxei64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) {
   return vloxei64_v_f64m8_m(mask, maskedoff, base, bindex, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i8.i64(<vscale x 1 x half>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP1]]
+//
+vfloat16mf4_t test_vloxei8_v_f16mf4 (const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
+  return vloxei8_v_f16mf4 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i8.i64(<vscale x 2 x half>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP1]]
+//
+vfloat16mf2_t test_vloxei8_v_f16mf2 (const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
+  return vloxei8_v_f16mf2 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei8_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i8.i64(<vscale x 4 x half>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP1]]
+//
+vfloat16m1_t test_vloxei8_v_f16m1 (const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
+  return vloxei8_v_f16m1 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei8_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i8.i64(<vscale x 8 x half>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP1]]
+//
+vfloat16m2_t test_vloxei8_v_f16m2 (const _Float16 *base, vuint8m1_t bindex, size_t vl) {
+  return vloxei8_v_f16m2 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei8_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i8.i64(<vscale x 16 x half>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP1]]
+//
+vfloat16m4_t test_vloxei8_v_f16m4 (const _Float16 *base, vuint8m2_t bindex, size_t vl) {
+  return vloxei8_v_f16m4 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei8_v_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 32 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i8.i64(<vscale x 32 x half>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP1]]
+//
+vfloat16m8_t test_vloxei8_v_f16m8 (const _Float16 *base, vuint8m4_t bindex, size_t vl) {
+  return vloxei8_v_f16m8 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i16.i64(<vscale x 1 x half>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP1]]
+//
+vfloat16mf4_t test_vloxei16_v_f16mf4 (const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
+  return vloxei16_v_f16mf4 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i16.i64(<vscale x 2 x half>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP1]]
+//
+vfloat16mf2_t test_vloxei16_v_f16mf2 (const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
+  return vloxei16_v_f16mf2 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i16.i64(<vscale x 4 x half>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP1]]
+//
+vfloat16m1_t test_vloxei16_v_f16m1 (const _Float16 *base, vuint16m1_t bindex, size_t vl) {
+  return vloxei16_v_f16m1 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei16_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i16.i64(<vscale x 8 x half>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP1]]
+//
+vfloat16m2_t test_vloxei16_v_f16m2 (const _Float16 *base, vuint16m2_t bindex, size_t vl) {
+  return vloxei16_v_f16m2 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei16_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i16.i64(<vscale x 16 x half>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP1]]
+//
+vfloat16m4_t test_vloxei16_v_f16m4 (const _Float16 *base, vuint16m4_t bindex, size_t vl) {
+  return vloxei16_v_f16m4 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei16_v_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 32 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i16.i64(<vscale x 32 x half>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP1]]
+//
+vfloat16m8_t test_vloxei16_v_f16m8 (const _Float16 *base, vuint16m8_t bindex, size_t vl) {
+  return vloxei16_v_f16m8 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i32.i64(<vscale x 1 x half>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP1]]
+//
+vfloat16mf4_t test_vloxei32_v_f16mf4 (const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
+  return vloxei32_v_f16mf4 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i32.i64(<vscale x 2 x half>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP1]]
+//
+vfloat16mf2_t test_vloxei32_v_f16mf2 (const _Float16 *base, vuint32m1_t bindex, size_t vl) {
+  return vloxei32_v_f16mf2 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei32_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i32.i64(<vscale x 4 x half>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP1]]
+//
+vfloat16m1_t test_vloxei32_v_f16m1 (const _Float16 *base, vuint32m2_t bindex, size_t vl) {
+  return vloxei32_v_f16m1 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei32_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i32.i64(<vscale x 8 x half>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP1]]
+//
+vfloat16m2_t test_vloxei32_v_f16m2 (const _Float16 *base, vuint32m4_t bindex, size_t vl) {
+  return vloxei32_v_f16m2 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei32_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i32.i64(<vscale x 16 x half>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP1]]
+//
+vfloat16m4_t test_vloxei32_v_f16m4 (const _Float16 *base, vuint32m8_t bindex, size_t vl) {
+  return vloxei32_v_f16m4 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i64.i64(<vscale x 1 x half>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP1]]
+//
+vfloat16mf4_t test_vloxei64_v_f16mf4 (const _Float16 *base, vuint64m1_t bindex, size_t vl) {
+  return vloxei64_v_f16mf4 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i64.i64(<vscale x 2 x half>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP1]]
+//
+vfloat16mf2_t test_vloxei64_v_f16mf2 (const _Float16 *base, vuint64m2_t bindex, size_t vl) {
+  return vloxei64_v_f16mf2 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei64_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i64.i64(<vscale x 4 x half>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP1]]
+//
+vfloat16m1_t test_vloxei64_v_f16m1 (const _Float16 *base, vuint64m4_t bindex, size_t vl) {
+  return vloxei64_v_f16m1 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei64_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i64.i64(<vscale x 8 x half>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP1]]
+//
+vfloat16m2_t test_vloxei64_v_f16m2 (const _Float16 *base, vuint64m8_t bindex, size_t vl) {
+  return vloxei64_v_f16m2 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP1]]
+//
+vfloat16mf4_t test_vloxei8_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
+  return vloxei8_v_f16mf4_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP1]]
+//
+vfloat16mf2_t test_vloxei8_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
+  return vloxei8_v_f16mf2_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei8_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP1]]
+//
+vfloat16m1_t test_vloxei8_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
+  return vloxei8_v_f16m1_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei8_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP1]]
+//
+vfloat16m2_t test_vloxei8_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
+  return vloxei8_v_f16m2_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei8_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP1]]
+//
+vfloat16m4_t test_vloxei8_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl) {
+  return vloxei8_v_f16m4_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei8_v_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 32 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP1]]
+//
+vfloat16m8_t test_vloxei8_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl) {
+  return vloxei8_v_f16m8_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP1]]
+//
+vfloat16mf4_t test_vloxei16_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
+  return vloxei16_v_f16mf4_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP1]]
+//
+vfloat16mf2_t test_vloxei16_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
+  return vloxei16_v_f16mf2_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP1]]
+//
+vfloat16m1_t test_vloxei16_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
+  return vloxei16_v_f16m1_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei16_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP1]]
+//
+vfloat16m2_t test_vloxei16_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
+  return vloxei16_v_f16m2_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei16_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP1]]
+//
+vfloat16m4_t test_vloxei16_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl) {
+  return vloxei16_v_f16m4_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei16_v_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 32 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP1]]
+//
+vfloat16m8_t test_vloxei16_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl) {
+  return vloxei16_v_f16m8_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP1]]
+//
+vfloat16mf4_t test_vloxei32_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
+  return vloxei32_v_f16mf4_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP1]]
+//
+vfloat16mf2_t test_vloxei32_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
+  return vloxei32_v_f16mf2_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei32_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP1]]
+//
+vfloat16m1_t test_vloxei32_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
+  return vloxei32_v_f16m1_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei32_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP1]]
+//
+vfloat16m2_t test_vloxei32_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
+  return vloxei32_v_f16m2_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei32_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP1]]
+//
+vfloat16m4_t test_vloxei32_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl) {
+  return vloxei32_v_f16m4_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP1]]
+//
+vfloat16mf4_t test_vloxei64_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
+  return vloxei64_v_f16mf4_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP1]]
+//
+vfloat16mf2_t test_vloxei64_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
+  return vloxei64_v_f16mf2_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei64_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP1]]
+//
+vfloat16m1_t test_vloxei64_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
+  return vloxei64_v_f16m1_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxei64_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP1]]
+//
+vfloat16m2_t test_vloxei64_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
+  return vloxei64_v_f16m2_m (mask, maskedoff, base, bindex, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg.c
index 3f76b449149e..1f615116e123 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg.c
@@ -1,7 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
-// RUN:   -target-feature +experimental-v \
+// RUN:   -target-feature +experimental-zfh -target-feature +experimental-v \
 // RUN:   -target-feature +experimental-zvlsseg -disable-O0-optnone -emit-llvm %s \
 // RUN:   -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
@@ -28783,3 +28783,3632 @@ void test_vloxseg2ei64_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t
   return vloxseg2ei64_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
 }
 
+// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg2.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
+  return vloxseg2ei8_v_f16mf4(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg3.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg3ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
+  return vloxseg3ei8_v_f16mf4(v0, v1, v2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg4.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg4ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
+  return vloxseg4ei8_v_f16mf4(v0, v1, v2, v3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg5.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg5ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
+  return vloxseg5ei8_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg6.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg6ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
+  return vloxseg6ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg7.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg7ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
+  return vloxseg7ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg8.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP8]], <vscale x 1 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg8ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
+  return vloxseg8ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg2.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
+  return vloxseg2ei8_v_f16mf2(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg3.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg3ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
+  return vloxseg3ei8_v_f16mf2(v0, v1, v2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg4.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg4ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
+  return vloxseg4ei8_v_f16mf2(v0, v1, v2, v3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg5.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg5ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
+  return vloxseg5ei8_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg6.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg6ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
+  return vloxseg6ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg7.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg7ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
+  return vloxseg7ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg8.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP8]], <vscale x 2 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg8ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
+  return vloxseg8ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg2.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
+  return vloxseg2ei8_v_f16m1(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg3.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg3ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
+  return vloxseg3ei8_v_f16m1(v0, v1, v2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg4.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg4ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
+  return vloxseg4ei8_v_f16m1(v0, v1, v2, v3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg5.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg5ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
+  return vloxseg5ei8_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg6.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg6ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
+  return vloxseg6ei8_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg7.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg7ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
+  return vloxseg7ei8_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg8.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP8]], <vscale x 4 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg8ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
+  return vloxseg8ei8_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg2.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
+  return vloxseg2ei8_v_f16m2(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg3.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg3ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
+  return vloxseg3ei8_v_f16m2(v0, v1, v2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg4.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP4]], <vscale x 8 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg4ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
+  return vloxseg4ei8_v_f16m2(v0, v1, v2, v3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half> } @llvm.riscv.vloxseg2.nxv16f16.nxv16i8.i64(half* [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP1]], <vscale x 16 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP2]], <vscale x 16 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei8_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint8m2_t bindex, size_t vl) {
+  return vloxseg2ei8_v_f16m4(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg2.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
+  return vloxseg2ei16_v_f16mf4(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg3.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg3ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
+  return vloxseg3ei16_v_f16mf4(v0, v1, v2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg4.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg4ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
+  return vloxseg4ei16_v_f16mf4(v0, v1, v2, v3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg5.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg5ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
+  return vloxseg5ei16_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg6.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg6ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
+  return vloxseg6ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg7.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg7ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
+  return vloxseg7ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg8.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP8]], <vscale x 1 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg8ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
+  return vloxseg8ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg2.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
+  return vloxseg2ei16_v_f16mf2(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg3.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg3ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
+  return vloxseg3ei16_v_f16mf2(v0, v1, v2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg4.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg4ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
+  return vloxseg4ei16_v_f16mf2(v0, v1, v2, v3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg5.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg5ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
+  return vloxseg5ei16_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg6.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg6ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
+  return vloxseg6ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg7.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg7ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
+  return vloxseg7ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg8.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP8]], <vscale x 2 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg8ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
+  return vloxseg8ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg2.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
+  return vloxseg2ei16_v_f16m1(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg3.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg3ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
+  return vloxseg3ei16_v_f16m1(v0, v1, v2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg4.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg4ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
+  return vloxseg4ei16_v_f16m1(v0, v1, v2, v3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg5.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg5ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
+  return vloxseg5ei16_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg6.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg6ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
+  return vloxseg6ei16_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg7.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg7ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
+  return vloxseg7ei16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg8.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP8]], <vscale x 4 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg8ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
+  return vloxseg8ei16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg2.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
+  return vloxseg2ei16_v_f16m2(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg3.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg3ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
+  return vloxseg3ei16_v_f16m2(v0, v1, v2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg4.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP4]], <vscale x 8 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg4ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
+  return vloxseg4ei16_v_f16m2(v0, v1, v2, v3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half> } @llvm.riscv.vloxseg2.nxv16f16.nxv16i16.i64(half* [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP1]], <vscale x 16 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP2]], <vscale x 16 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei16_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint16m4_t bindex, size_t vl) {
+  return vloxseg2ei16_v_f16m4(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg2.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
+  return vloxseg2ei32_v_f16mf4(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg3.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg3ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
+  return vloxseg3ei32_v_f16mf4(v0, v1, v2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg4.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg4ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
+  return vloxseg4ei32_v_f16mf4(v0, v1, v2, v3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg5.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg5ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
+  return vloxseg5ei32_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg6.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg6ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
+  return vloxseg6ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg7.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg7ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
+  return vloxseg7ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg8.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP8]], <vscale x 1 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg8ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
+  return vloxseg8ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg2.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
+  return vloxseg2ei32_v_f16mf2(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg3.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg3ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
+  return vloxseg3ei32_v_f16mf2(v0, v1, v2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg4.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg4ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
+  return vloxseg4ei32_v_f16mf2(v0, v1, v2, v3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg5.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg5ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
+  return vloxseg5ei32_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg6.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg6ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
+  return vloxseg6ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg7.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg7ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
+  return vloxseg7ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg8.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP8]], <vscale x 2 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg8ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
+  return vloxseg8ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg2.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
+  return vloxseg2ei32_v_f16m1(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg3.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg3ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
+  return vloxseg3ei32_v_f16m1(v0, v1, v2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg4.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg4ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
+  return vloxseg4ei32_v_f16m1(v0, v1, v2, v3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg5.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg5ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
+  return vloxseg5ei32_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg6.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg6ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
+  return vloxseg6ei32_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg7.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg7ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
+  return vloxseg7ei32_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg8.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP8]], <vscale x 4 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg8ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
+  return vloxseg8ei32_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg2.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
+  return vloxseg2ei32_v_f16m2(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg3.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg3ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
+  return vloxseg3ei32_v_f16m2(v0, v1, v2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg4.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP4]], <vscale x 8 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg4ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
+  return vloxseg4ei32_v_f16m2(v0, v1, v2, v3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half> } @llvm.riscv.vloxseg2.nxv16f16.nxv16i32.i64(half* [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP1]], <vscale x 16 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP2]], <vscale x 16 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei32_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint32m8_t bindex, size_t vl) {
+  return vloxseg2ei32_v_f16m4(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg2.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
+  return vloxseg2ei64_v_f16mf4(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg3.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg3ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
+  return vloxseg3ei64_v_f16mf4(v0, v1, v2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg4.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg4ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
+  return vloxseg4ei64_v_f16mf4(v0, v1, v2, v3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg5.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg5ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
+  return vloxseg5ei64_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg6.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg6ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
+  return vloxseg6ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg7.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg7ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
+  return vloxseg7ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg8.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP8]], <vscale x 1 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg8ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
+  return vloxseg8ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg2.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
+  return vloxseg2ei64_v_f16mf2(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg3.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg3ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
+  return vloxseg3ei64_v_f16mf2(v0, v1, v2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg4.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg4ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
+  return vloxseg4ei64_v_f16mf2(v0, v1, v2, v3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg5.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg5ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
+  return vloxseg5ei64_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg6.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg6ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
+  return vloxseg6ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg7.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg7ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
+  return vloxseg7ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg8.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP8]], <vscale x 2 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg8ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
+  return vloxseg8ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg2.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
+  return vloxseg2ei64_v_f16m1(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg3.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg3ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
+  return vloxseg3ei64_v_f16m1(v0, v1, v2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg4.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg4ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
+  return vloxseg4ei64_v_f16m1(v0, v1, v2, v3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg5.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg5ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
+  return vloxseg5ei64_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg6.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg6ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
+  return vloxseg6ei64_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg7.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg7ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
+  return vloxseg7ei64_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg8.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP8]], <vscale x 4 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg8ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
+  return vloxseg8ei64_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg2.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
+  return vloxseg2ei64_v_f16m2(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg3.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg3ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
+  return vloxseg3ei64_v_f16m2(v0, v1, v2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg4.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP4]], <vscale x 8 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg4ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
+  return vloxseg4ei64_v_f16m2(v0, v1, v2, v3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
+  return vloxseg2ei8_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg3ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
+  return vloxseg3ei8_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg4ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
+  return vloxseg4ei8_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg5ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
+  return vloxseg5ei8_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg6ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
+  return vloxseg6ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], <vscale x 1 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg7ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
+  return vloxseg7ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], <vscale x 1 x half> [[MASKEDOFF6:%.*]], <vscale x 1 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP8]], <vscale x 1 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg8ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
+  return vloxseg8ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
+  return vloxseg2ei8_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg3ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
+  return vloxseg3ei8_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg4ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
+  return vloxseg4ei8_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg5ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
+  return vloxseg5ei8_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg6ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
+  return vloxseg6ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], <vscale x 2 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg7ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
+  return vloxseg7ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], <vscale x 2 x half> [[MASKEDOFF6:%.*]], <vscale x 2 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP8]], <vscale x 2 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg8ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
+  return vloxseg8ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
+  return vloxseg2ei8_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg3ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
+  return vloxseg3ei8_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg4ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
+  return vloxseg4ei8_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg5ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
+  return vloxseg5ei8_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg6ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
+  return vloxseg6ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], <vscale x 4 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg7ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
+  return vloxseg7ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], <vscale x 4 x half> [[MASKEDOFF6:%.*]], <vscale x 4 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP8]], <vscale x 4 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg8ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
+  return vloxseg8ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei8_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
+  return vloxseg2ei8_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], <vscale x 8 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg3ei8_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
+  return vloxseg3ei8_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], <vscale x 8 x half> [[MASKEDOFF2:%.*]], <vscale x 8 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP4]], <vscale x 8 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg4ei8_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
+  return vloxseg4ei8_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half> } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8.i64(<vscale x 16 x half> [[MASKEDOFF0:%.*]], <vscale x 16 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP1]], <vscale x 16 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP2]], <vscale x 16 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei8_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl) {
+  return vloxseg2ei8_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
+  return vloxseg2ei16_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg3ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
+  return vloxseg3ei16_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg4ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
+  return vloxseg4ei16_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg5ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
+  return vloxseg5ei16_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg6ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
+  return vloxseg6ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], <vscale x 1 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg7ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
+  return vloxseg7ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], <vscale x 1 x half> [[MASKEDOFF6:%.*]], <vscale x 1 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP8]], <vscale x 1 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg8ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
+  return vloxseg8ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
+  return vloxseg2ei16_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg3ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
+  return vloxseg3ei16_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg4ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
+  return vloxseg4ei16_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg5ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
+  return vloxseg5ei16_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg6ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
+  return vloxseg6ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], <vscale x 2 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg7ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
+  return vloxseg7ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], <vscale x 2 x half> [[MASKEDOFF6:%.*]], <vscale x 2 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP8]], <vscale x 2 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg8ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
+  return vloxseg8ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
+  return vloxseg2ei16_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg3ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
+  return vloxseg3ei16_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg4ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
+  return vloxseg4ei16_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg5ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
+  return vloxseg5ei16_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg6ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
+  return vloxseg6ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], <vscale x 4 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg7ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
+  return vloxseg7ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], <vscale x 4 x half> [[MASKEDOFF6:%.*]], <vscale x 4 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP8]], <vscale x 4 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg8ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
+  return vloxseg8ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16.i64(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
+  return vloxseg2ei16_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16.i64(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], <vscale x 8 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg3ei16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
+  return vloxseg3ei16_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16.i64(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], <vscale x 8 x half> [[MASKEDOFF2:%.*]], <vscale x 8 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP4]], <vscale x 8 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg4ei16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
+  return vloxseg4ei16_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half> } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16.i64(<vscale x 16 x half> [[MASKEDOFF0:%.*]], <vscale x 16 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP1]], <vscale x 16 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP2]], <vscale x 16 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei16_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl) {
+  return vloxseg2ei16_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
+  return vloxseg2ei32_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg3ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
+  return vloxseg3ei32_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg4ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
+  return vloxseg4ei32_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg5ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
+  return vloxseg5ei32_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg6ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
+  return vloxseg6ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], <vscale x 1 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg7ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
+  return vloxseg7ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], <vscale x 1 x half> [[MASKEDOFF6:%.*]], <vscale x 1 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP8]], <vscale x 1 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg8ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
+  return vloxseg8ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
+  return vloxseg2ei32_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg3ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
+  return vloxseg3ei32_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg4ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
+  return vloxseg4ei32_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg5ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
+  return vloxseg5ei32_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg6ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
+  return vloxseg6ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], <vscale x 2 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg7ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
+  return vloxseg7ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], <vscale x 2 x half> [[MASKEDOFF6:%.*]], <vscale x 2 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP8]], <vscale x 2 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg8ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
+  return vloxseg8ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
+  return vloxseg2ei32_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg3ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
+  return vloxseg3ei32_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg4ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
+  return vloxseg4ei32_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg5ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
+  return vloxseg5ei32_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg6ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
+  return vloxseg6ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], <vscale x 4 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg7ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
+  return vloxseg7ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], <vscale x 4 x half> [[MASKEDOFF6:%.*]], <vscale x 4 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP8]], <vscale x 4 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg8ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
+  return vloxseg8ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32.i64(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei32_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
+  return vloxseg2ei32_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32.i64(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], <vscale x 8 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg3ei32_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
+  return vloxseg3ei32_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32.i64(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], <vscale x 8 x half> [[MASKEDOFF2:%.*]], <vscale x 8 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP4]], <vscale x 8 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg4ei32_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
+  return vloxseg4ei32_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half> } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32.i64(<vscale x 16 x half> [[MASKEDOFF0:%.*]], <vscale x 16 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP1]], <vscale x 16 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP2]], <vscale x 16 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei32_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl) {
+  return vloxseg2ei32_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
+  return vloxseg2ei64_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg3ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
+  return vloxseg3ei64_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg4ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
+  return vloxseg4ei64_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg5ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
+  return vloxseg5ei64_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg6ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
+  return vloxseg6ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], <vscale x 1 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg7ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
+  return vloxseg7ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], <vscale x 1 x half> [[MASKEDOFF6:%.*]], <vscale x 1 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP8]], <vscale x 1 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg8ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
+  return vloxseg8ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
+  return vloxseg2ei64_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg3ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
+  return vloxseg3ei64_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg4ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
+  return vloxseg4ei64_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg5ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
+  return vloxseg5ei64_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg6ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
+  return vloxseg6ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], <vscale x 2 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg7ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
+  return vloxseg7ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], <vscale x 2 x half> [[MASKEDOFF6:%.*]], <vscale x 2 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP8]], <vscale x 2 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg8ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
+  return vloxseg8ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
+  return vloxseg2ei64_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg3ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
+  return vloxseg3ei64_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg4ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
+  return vloxseg4ei64_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg5ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
+  return vloxseg5ei64_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg6ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
+  return vloxseg6ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], <vscale x 4 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg7ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
+  return vloxseg7ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], <vscale x 4 x half> [[MASKEDOFF6:%.*]], <vscale x 4 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP8]], <vscale x 4 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg8ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
+  return vloxseg8ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i64.i64(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei64_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
+  return vloxseg2ei64_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i64.i64(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], <vscale x 8 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg3ei64_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
+  return vloxseg3ei64_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i64.i64(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], <vscale x 8 x half> [[MASKEDOFF2:%.*]], <vscale x 8 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP4]], <vscale x 8 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg4ei64_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
+  return vloxseg4ei64_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlse.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlse.c
index d31df14d6b0c..58c6570071c5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlse.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlse.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -1219,3 +1220,123 @@ vfloat64m8_t test_vlse64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
                                    size_t vl) {
   return vlse64_v_f64m8_m(mask, maskedoff, base, bstride, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vlse16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vlse.nxv1f16.i64(<vscale x 1 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP1]]
+//
+vfloat16mf4_t test_vlse16_v_f16mf4 (const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlse16_v_f16mf4 (base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlse16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vlse.nxv2f16.i64(<vscale x 2 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP1]]
+//
+vfloat16mf2_t test_vlse16_v_f16mf2 (const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlse16_v_f16mf2 (base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlse16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vlse.nxv4f16.i64(<vscale x 4 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP1]]
+//
+vfloat16m1_t test_vlse16_v_f16m1 (const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlse16_v_f16m1 (base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlse16_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vlse.nxv8f16.i64(<vscale x 8 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP1]]
+//
+vfloat16m2_t test_vlse16_v_f16m2 (const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlse16_v_f16m2 (base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlse16_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vlse.nxv16f16.i64(<vscale x 16 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP1]]
+//
+vfloat16m4_t test_vlse16_v_f16m4 (const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlse16_v_f16m4 (base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlse16_v_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 32 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x half> @llvm.riscv.vlse.nxv32f16.i64(<vscale x 32 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP1]]
+//
+vfloat16m8_t test_vlse16_v_f16m8 (const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlse16_v_f16m8 (base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlse16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vlse.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP1]]
+//
+vfloat16mf4_t test_vlse16_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlse16_v_f16mf4_m (mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlse16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vlse.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP1]]
+//
+vfloat16mf2_t test_vlse16_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlse16_v_f16mf2_m (mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlse16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vlse.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP1]]
+//
+vfloat16m1_t test_vlse16_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlse16_v_f16m1_m (mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlse16_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vlse.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP1]]
+//
+vfloat16m2_t test_vlse16_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlse16_v_f16m2_m (mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlse16_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vlse.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP1]]
+//
+vfloat16m4_t test_vlse16_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlse16_v_f16m4_m (mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlse16_v_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 32 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x half> @llvm.riscv.vlse.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP1]]
+//
+vfloat16m8_t test_vlse16_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlse16_v_f16m8_m (mask, maskedoff, base, bstride, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg.c
index 13a65342565d..50e1141779f7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg.c
@@ -12960,3 +12960,1631 @@ void test_vlseg4e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t
 void test_vlseg2e64_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t vl) {
   return vlseg2e64_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
 }
+
+// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlseg2.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlseg2.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg2e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, size_t vl) {
+  return vlseg2e16_v_f16mf4(v0, v1, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg3e16_v_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlseg3.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlseg3.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg3e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, size_t vl) {
+  return vlseg3e16_v_f16mf4(v0, v1, v2, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg4e16_v_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlseg4.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlseg4.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg4e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, size_t vl) {
+  return vlseg4e16_v_f16mf4(v0, v1, v2, v3, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg5e16_v_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlseg5.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlseg5.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg5e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, size_t vl) {
+  return vlseg5e16_v_f16mf4(v0, v1, v2, v3, v4, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg6e16_v_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlseg6.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlseg6.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg6e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, size_t vl) {
+  return vlseg6e16_v_f16mf4(v0, v1, v2, v3, v4, v5, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg7e16_v_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlseg7.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlseg7.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg7e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, size_t vl) {
+  return vlseg7e16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg8e16_v_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlseg8.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 7
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP8]], <vscale x 1 x half>* [[V7:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlseg8.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP8]], <vscale x 1 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg8e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, size_t vl) {
+  return vlseg8e16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlseg2.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlseg2.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg2e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, size_t vl) {
+  return vlseg2e16_v_f16mf2(v0, v1, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg3e16_v_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlseg3.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlseg3.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg3e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, size_t vl) {
+  return vlseg3e16_v_f16mf2(v0, v1, v2, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg4e16_v_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlseg4.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlseg4.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg4e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, size_t vl) {
+  return vlseg4e16_v_f16mf2(v0, v1, v2, v3, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg5e16_v_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlseg5.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlseg5.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg5e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, size_t vl) {
+  return vlseg5e16_v_f16mf2(v0, v1, v2, v3, v4, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg6e16_v_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlseg6.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlseg6.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg6e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, size_t vl) {
+  return vlseg6e16_v_f16mf2(v0, v1, v2, v3, v4, v5, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg7e16_v_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlseg7.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlseg7.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg7e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, size_t vl) {
+  return vlseg7e16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg8e16_v_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlseg8.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 7
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP8]], <vscale x 2 x half>* [[V7:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlseg8.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP8]], <vscale x 2 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg8e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, size_t vl) {
+  return vlseg8e16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlseg2.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlseg2.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg2e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, size_t vl) {
+  return vlseg2e16_v_f16m1(v0, v1, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg3e16_v_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlseg3.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlseg3.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg3e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, size_t vl) {
+  return vlseg3e16_v_f16m1(v0, v1, v2, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg4e16_v_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlseg4.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlseg4.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg4e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, size_t vl) {
+  return vlseg4e16_v_f16m1(v0, v1, v2, v3, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg5e16_v_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlseg5.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlseg5.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg5e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, size_t vl) {
+  return vlseg5e16_v_f16m1(v0, v1, v2, v3, v4, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg6e16_v_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlseg6.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlseg6.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg6e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, size_t vl) {
+  return vlseg6e16_v_f16m1(v0, v1, v2, v3, v4, v5, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg7e16_v_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlseg7.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlseg7.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg7e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, size_t vl) {
+  return vlseg7e16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg8e16_v_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlseg8.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 7
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP8]], <vscale x 4 x half>* [[V7:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlseg8.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP8]], <vscale x 4 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg8e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, size_t vl) {
+  return vlseg8e16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vlseg2.nxv8f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vlseg2.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg2e16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, size_t vl) {
+  return vlseg2e16_v_f16m2(v0, v1, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg3e16_v_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vlseg3.nxv8f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vlseg3.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg3e16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, size_t vl) {
+  return vlseg3e16_v_f16m2(v0, v1, v2, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg4e16_v_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vlseg4.nxv8f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP4]], <vscale x 8 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vlseg4.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP4]], <vscale x 8 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg4e16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, size_t vl) {
+  return vlseg4e16_v_f16m2(v0, v1, v2, v3, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half> } @llvm.riscv.vlseg2.nxv16f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 16 x half> [[TMP1]], <vscale x 16 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 16 x half> [[TMP2]], <vscale x 16 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half> } @llvm.riscv.vlseg2.nxv16f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP1]], <vscale x 16 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP2]], <vscale x 16 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg2e16_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, size_t vl) {
+  return vlseg2e16_v_f16m4(v0, v1, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlseg2.mask.nxv1f16.i32(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlseg2.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg2e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t vl) {
+  return vlseg2e16_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg3e16_v_f16mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlseg3.mask.nxv1f16.i32(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlseg3.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg3e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t vl) {
+  return vlseg3e16_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg4e16_v_f16mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlseg4.mask.nxv1f16.i32(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlseg4.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg4e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t vl) {
+  return vlseg4e16_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg5e16_v_f16mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlseg5.mask.nxv1f16.i32(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlseg5.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg5e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t vl) {
+  return vlseg5e16_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg6e16_v_f16mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlseg6.mask.nxv1f16.i32(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlseg6.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg6e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t vl) {
+  return vlseg6e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg7e16_v_f16mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlseg7.mask.nxv1f16.i32(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], <vscale x 1 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlseg7.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], <vscale x 1 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg7e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t vl) {
+  return vlseg7e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg8e16_v_f16mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlseg8.mask.nxv1f16.i32(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], <vscale x 1 x half> [[MASKEDOFF6:%.*]], <vscale x 1 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 7
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP8]], <vscale x 1 x half>* [[V7:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlseg8.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], <vscale x 1 x half> [[MASKEDOFF6:%.*]], <vscale x 1 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP8]], <vscale x 1 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg8e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t vl) {
+  return vlseg8e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlseg2.mask.nxv2f16.i32(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlseg2.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg2e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t vl) {
+  return vlseg2e16_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg3e16_v_f16mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlseg3.mask.nxv2f16.i32(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlseg3.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg3e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t vl) {
+  return vlseg3e16_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg4e16_v_f16mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlseg4.mask.nxv2f16.i32(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlseg4.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg4e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t vl) {
+  return vlseg4e16_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg5e16_v_f16mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlseg5.mask.nxv2f16.i32(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlseg5.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg5e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t vl) {
+  return vlseg5e16_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg6e16_v_f16mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlseg6.mask.nxv2f16.i32(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlseg6.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg6e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t vl) {
+  return vlseg6e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg7e16_v_f16mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlseg7.mask.nxv2f16.i32(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], <vscale x 2 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlseg7.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], <vscale x 2 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg7e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t vl) {
+  return vlseg7e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg8e16_v_f16mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlseg8.mask.nxv2f16.i32(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], <vscale x 2 x half> [[MASKEDOFF6:%.*]], <vscale x 2 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 7
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP8]], <vscale x 2 x half>* [[V7:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlseg8.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], <vscale x 2 x half> [[MASKEDOFF6:%.*]], <vscale x 2 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP8]], <vscale x 2 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg8e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t vl) {
+  return vlseg8e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlseg2.mask.nxv4f16.i32(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlseg2.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg2e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t vl) {
+  return vlseg2e16_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg3e16_v_f16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlseg3.mask.nxv4f16.i32(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlseg3.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg3e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t vl) {
+  return vlseg3e16_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg4e16_v_f16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlseg4.mask.nxv4f16.i32(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlseg4.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg4e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t vl) {
+  return vlseg4e16_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg5e16_v_f16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlseg5.mask.nxv4f16.i32(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlseg5.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg5e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t vl) {
+  return vlseg5e16_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg6e16_v_f16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlseg6.mask.nxv4f16.i32(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlseg6.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg6e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t vl) {
+  return vlseg6e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg7e16_v_f16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlseg7.mask.nxv4f16.i32(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], <vscale x 4 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlseg7.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], <vscale x 4 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg7e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t vl) {
+  return vlseg7e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg8e16_v_f16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlseg8.mask.nxv4f16.i32(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], <vscale x 4 x half> [[MASKEDOFF6:%.*]], <vscale x 4 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 7
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP8]], <vscale x 4 x half>* [[V7:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlseg8.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], <vscale x 4 x half> [[MASKEDOFF6:%.*]], <vscale x 4 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP8]], <vscale x 4 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg8e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t vl) {
+  return vlseg8e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vlseg2.mask.nxv8f16.i32(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vlseg2.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg2e16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t vl) {
+  return vlseg2e16_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg3e16_v_f16m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vlseg3.mask.nxv8f16.i32(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], <vscale x 8 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vlseg3.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], <vscale x 8 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg3e16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t vl) {
+  return vlseg3e16_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg4e16_v_f16m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vlseg4.mask.nxv8f16.i32(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], <vscale x 8 x half> [[MASKEDOFF2:%.*]], <vscale x 8 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP4]], <vscale x 8 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vlseg4.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], <vscale x 8 x half> [[MASKEDOFF2:%.*]], <vscale x 8 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP4]], <vscale x 8 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg4e16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t vl) {
+  return vlseg4e16_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half> } @llvm.riscv.vlseg2.mask.nxv16f16.i32(<vscale x 16 x half> [[MASKEDOFF0:%.*]], <vscale x 16 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 16 x half> [[TMP1]], <vscale x 16 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 16 x half> [[TMP2]], <vscale x 16 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half> } @llvm.riscv.vlseg2.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF0:%.*]], <vscale x 16 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP1]], <vscale x 16 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP2]], <vscale x 16 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg2e16_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t vl) {
+  return vlseg2e16_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff.c
index 59af0a0e94f7..c279ea825a9e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff.c
@@ -14568,3 +14568,1831 @@ void test_vlseg4e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_
 void test_vlseg2e64ff_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t *new_vl, size_t vl) {
   return vlseg2e64ff_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
 }
+
+// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, i32 } @llvm.riscv.vlseg2ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, i64 } @llvm.riscv.vlseg2ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg2e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg2e16ff_v_f16mf4(v0, v1, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } @llvm.riscv.vlseg3ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } @llvm.riscv.vlseg3ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg3e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg3e16ff_v_f16mf4(v0, v1, v2, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } @llvm.riscv.vlseg4ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } @llvm.riscv.vlseg4ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg4e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg4e16ff_v_f16mf4(v0, v1, v2, v3, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } @llvm.riscv.vlseg5ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } @llvm.riscv.vlseg5ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg5e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg5e16ff_v_f16mf4(v0, v1, v2, v3, v4, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } @llvm.riscv.vlseg6ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 6
+// CHECK-RV32-NEXT:    store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } @llvm.riscv.vlseg6ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg6e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg6e16ff_v_f16mf4(v0, v1, v2, v3, v4, v5, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } @llvm.riscv.vlseg7ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 6
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 7
+// CHECK-RV32-NEXT:    store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } @llvm.riscv.vlseg7ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg7e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg7e16ff_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } @llvm.riscv.vlseg8ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 6
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 7
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP8]], <vscale x 1 x half>* [[V7:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP9:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 8
+// CHECK-RV32-NEXT:    store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } @llvm.riscv.vlseg8ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP8]], <vscale x 1 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP9:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 8
+// CHECK-RV64-NEXT:    store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg8e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg8e16ff_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, i32 } @llvm.riscv.vlseg2ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, i64 } @llvm.riscv.vlseg2ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg2e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg2e16ff_v_f16mf2(v0, v1, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } @llvm.riscv.vlseg3ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } @llvm.riscv.vlseg3ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg3e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg3e16ff_v_f16mf2(v0, v1, v2, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } @llvm.riscv.vlseg4ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } @llvm.riscv.vlseg4ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg4e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg4e16ff_v_f16mf2(v0, v1, v2, v3, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } @llvm.riscv.vlseg5ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } @llvm.riscv.vlseg5ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg5e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg5e16ff_v_f16mf2(v0, v1, v2, v3, v4, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } @llvm.riscv.vlseg6ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 6
+// CHECK-RV32-NEXT:    store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } @llvm.riscv.vlseg6ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg6e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg6e16ff_v_f16mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } @llvm.riscv.vlseg7ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 6
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 7
+// CHECK-RV32-NEXT:    store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } @llvm.riscv.vlseg7ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg7e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg7e16ff_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } @llvm.riscv.vlseg8ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 6
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 7
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP8]], <vscale x 2 x half>* [[V7:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP9:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 8
+// CHECK-RV32-NEXT:    store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } @llvm.riscv.vlseg8ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP8]], <vscale x 2 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP9:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 8
+// CHECK-RV64-NEXT:    store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg8e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg8e16ff_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, i32 } @llvm.riscv.vlseg2ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, i64 } @llvm.riscv.vlseg2ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg2e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg2e16ff_v_f16m1(v0, v1, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } @llvm.riscv.vlseg3ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } @llvm.riscv.vlseg3ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg3e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg3e16ff_v_f16m1(v0, v1, v2, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } @llvm.riscv.vlseg4ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } @llvm.riscv.vlseg4ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg4e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg4e16ff_v_f16m1(v0, v1, v2, v3, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } @llvm.riscv.vlseg5ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } @llvm.riscv.vlseg5ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg5e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg5e16ff_v_f16m1(v0, v1, v2, v3, v4, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } @llvm.riscv.vlseg6ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 6
+// CHECK-RV32-NEXT:    store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } @llvm.riscv.vlseg6ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg6e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg6e16ff_v_f16m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } @llvm.riscv.vlseg7ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 6
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 7
+// CHECK-RV32-NEXT:    store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } @llvm.riscv.vlseg7ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg7e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg7e16ff_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } @llvm.riscv.vlseg8ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 6
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 7
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP8]], <vscale x 4 x half>* [[V7:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP9:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 8
+// CHECK-RV32-NEXT:    store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } @llvm.riscv.vlseg8ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP8]], <vscale x 4 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP9:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 8
+// CHECK-RV64-NEXT:    store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg8e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg8e16ff_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, i32 } @llvm.riscv.vlseg2ff.nxv8f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, i64 } @llvm.riscv.vlseg2ff.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg2e16ff_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg2e16ff_v_f16m2(v0, v1, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32 } @llvm.riscv.vlseg3ff.nxv8f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i64 } @llvm.riscv.vlseg3ff.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg3e16ff_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg3e16ff_v_f16m2(v0, v1, v2, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32 } @llvm.riscv.vlseg4ff.nxv8f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP4]], <vscale x 8 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32 } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i64 } @llvm.riscv.vlseg4ff.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP4]], <vscale x 8 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i64 } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg4e16ff_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg4e16ff_v_f16m2(v0, v1, v2, v3, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half>, i32 } @llvm.riscv.vlseg2ff.nxv16f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 16 x half> [[TMP1]], <vscale x 16 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 16 x half> [[TMP2]], <vscale x 16 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half>, i64 } @llvm.riscv.vlseg2ff.nxv16f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP1]], <vscale x 16 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP2]], <vscale x 16 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg2e16ff_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg2e16ff_v_f16m4(v0, v1, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, i32 } @llvm.riscv.vlseg2ff.mask.nxv1f16.i32(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, i64 } @llvm.riscv.vlseg2ff.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg2e16ff_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg2e16ff_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_f16mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } @llvm.riscv.vlseg3ff.mask.nxv1f16.i32(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } @llvm.riscv.vlseg3ff.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg3e16ff_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg3e16ff_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_f16mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } @llvm.riscv.vlseg4ff.mask.nxv1f16.i32(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } @llvm.riscv.vlseg4ff.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg4e16ff_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg4e16ff_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_f16mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } @llvm.riscv.vlseg5ff.mask.nxv1f16.i32(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } @llvm.riscv.vlseg5ff.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg5e16ff_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg5e16ff_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_f16mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } @llvm.riscv.vlseg6ff.mask.nxv1f16.i32(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 6
+// CHECK-RV32-NEXT:    store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } @llvm.riscv.vlseg6ff.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg6e16ff_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg6e16ff_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_f16mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } @llvm.riscv.vlseg7ff.mask.nxv1f16.i32(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], <vscale x 1 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 6
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 7
+// CHECK-RV32-NEXT:    store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } @llvm.riscv.vlseg7ff.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], <vscale x 1 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg7e16ff_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg7e16ff_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_f16mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } @llvm.riscv.vlseg8ff.mask.nxv1f16.i32(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], <vscale x 1 x half> [[MASKEDOFF6:%.*]], <vscale x 1 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 6
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 7
+// CHECK-RV32-NEXT:    store <vscale x 1 x half> [[TMP8]], <vscale x 1 x half>* [[V7:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP9:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i32 } [[TMP0]], 8
+// CHECK-RV32-NEXT:    store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } @llvm.riscv.vlseg8ff.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], <vscale x 1 x half> [[MASKEDOFF6:%.*]], <vscale x 1 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP8]], <vscale x 1 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP9:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, i64 } [[TMP0]], 8
+// CHECK-RV64-NEXT:    store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg8e16ff_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg8e16ff_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, i32 } @llvm.riscv.vlseg2ff.mask.nxv2f16.i32(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, i64 } @llvm.riscv.vlseg2ff.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg2e16ff_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg2e16ff_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_f16mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } @llvm.riscv.vlseg3ff.mask.nxv2f16.i32(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } @llvm.riscv.vlseg3ff.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg3e16ff_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg3e16ff_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_f16mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } @llvm.riscv.vlseg4ff.mask.nxv2f16.i32(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } @llvm.riscv.vlseg4ff.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg4e16ff_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg4e16ff_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_f16mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } @llvm.riscv.vlseg5ff.mask.nxv2f16.i32(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } @llvm.riscv.vlseg5ff.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg5e16ff_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg5e16ff_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_f16mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } @llvm.riscv.vlseg6ff.mask.nxv2f16.i32(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 6
+// CHECK-RV32-NEXT:    store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } @llvm.riscv.vlseg6ff.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg6e16ff_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg6e16ff_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_f16mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } @llvm.riscv.vlseg7ff.mask.nxv2f16.i32(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], <vscale x 2 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 6
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 7
+// CHECK-RV32-NEXT:    store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } @llvm.riscv.vlseg7ff.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], <vscale x 2 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg7e16ff_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg7e16ff_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_f16mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } @llvm.riscv.vlseg8ff.mask.nxv2f16.i32(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], <vscale x 2 x half> [[MASKEDOFF6:%.*]], <vscale x 2 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 6
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 7
+// CHECK-RV32-NEXT:    store <vscale x 2 x half> [[TMP8]], <vscale x 2 x half>* [[V7:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP9:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i32 } [[TMP0]], 8
+// CHECK-RV32-NEXT:    store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } @llvm.riscv.vlseg8ff.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], <vscale x 2 x half> [[MASKEDOFF6:%.*]], <vscale x 2 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP8]], <vscale x 2 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP9:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, i64 } [[TMP0]], 8
+// CHECK-RV64-NEXT:    store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg8e16ff_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg8e16ff_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, i32 } @llvm.riscv.vlseg2ff.mask.nxv4f16.i32(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, i64 } @llvm.riscv.vlseg2ff.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg2e16ff_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg2e16ff_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_f16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } @llvm.riscv.vlseg3ff.mask.nxv4f16.i32(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } @llvm.riscv.vlseg3ff.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg3e16ff_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg3e16ff_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_f16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } @llvm.riscv.vlseg4ff.mask.nxv4f16.i32(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } @llvm.riscv.vlseg4ff.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg4e16ff_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg4e16ff_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_f16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } @llvm.riscv.vlseg5ff.mask.nxv4f16.i32(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } @llvm.riscv.vlseg5ff.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg5e16ff_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg5e16ff_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_f16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } @llvm.riscv.vlseg6ff.mask.nxv4f16.i32(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 6
+// CHECK-RV32-NEXT:    store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } @llvm.riscv.vlseg6ff.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg6e16ff_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg6e16ff_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_f16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } @llvm.riscv.vlseg7ff.mask.nxv4f16.i32(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], <vscale x 4 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 6
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 7
+// CHECK-RV32-NEXT:    store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } @llvm.riscv.vlseg7ff.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], <vscale x 4 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg7e16ff_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg7e16ff_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_f16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } @llvm.riscv.vlseg8ff.mask.nxv4f16.i32(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], <vscale x 4 x half> [[MASKEDOFF6:%.*]], <vscale x 4 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 5
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 6
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 7
+// CHECK-RV32-NEXT:    store <vscale x 4 x half> [[TMP8]], <vscale x 4 x half>* [[V7:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP9:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i32 } [[TMP0]], 8
+// CHECK-RV32-NEXT:    store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } @llvm.riscv.vlseg8ff.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], <vscale x 4 x half> [[MASKEDOFF6:%.*]], <vscale x 4 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP8]], <vscale x 4 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP9:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, i64 } [[TMP0]], 8
+// CHECK-RV64-NEXT:    store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg8e16ff_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg8e16ff_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, i32 } @llvm.riscv.vlseg2ff.mask.nxv8f16.i32(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, i64 } @llvm.riscv.vlseg2ff.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg2e16ff_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg2e16ff_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_f16m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32 } @llvm.riscv.vlseg3ff.mask.nxv8f16.i32(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], <vscale x 8 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i64 } @llvm.riscv.vlseg3ff.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], <vscale x 8 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg3e16ff_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg3e16ff_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_f16m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32 } @llvm.riscv.vlseg4ff.mask.nxv8f16.i32(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], <vscale x 8 x half> [[MASKEDOFF2:%.*]], <vscale x 8 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32 } [[TMP0]], 3
+// CHECK-RV32-NEXT:    store <vscale x 8 x half> [[TMP4]], <vscale x 8 x half>* [[V3:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32 } [[TMP0]], 4
+// CHECK-RV32-NEXT:    store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i64 } @llvm.riscv.vlseg4ff.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], <vscale x 8 x half> [[MASKEDOFF2:%.*]], <vscale x 8 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i64 } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP4]], <vscale x 8 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i64 } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg4e16ff_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg4e16ff_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half>, i32 } @llvm.riscv.vlseg2ff.mask.nxv16f16.i32(<vscale x 16 x half> [[MASKEDOFF0:%.*]], <vscale x 16 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 16 x half> [[TMP1]], <vscale x 16 x half>* [[V0:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 16 x half> [[TMP2]], <vscale x 16 x half>* [[V1:%.*]], align 2
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half>, i64 } @llvm.riscv.vlseg2ff.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF0:%.*]], <vscale x 16 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP1]], <vscale x 16 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP2]], <vscale x 16 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg2e16ff_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) {
+  return vlseg2e16ff_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsseg.c
index 862a33c9640f..dd2bdb9d9b76 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsseg.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsseg.c
@@ -1,7 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
-// RUN:   -target-feature +experimental-v \
+// RUN:   -target-feature +experimental-zfh -target-feature +experimental-v \
 // RUN:   -target-feature +experimental-zvlsseg -disable-O0-optnone -emit-llvm %s \
 // RUN:   -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
@@ -7285,3 +7285,916 @@ void test_vlsseg2e64_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t ma
   return vlsseg2e64_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
 }
 
+// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlsseg2.nxv1f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg2e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg2e16_v_f16mf4(v0, v1, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlsseg3.nxv1f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg3e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg3e16_v_f16mf4(v0, v1, v2, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlsseg4.nxv1f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg4e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg4e16_v_f16mf4(v0, v1, v2, v3, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlsseg5.nxv1f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg5e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg5e16_v_f16mf4(v0, v1, v2, v3, v4, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlsseg6.nxv1f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg6e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg6e16_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlsseg7.nxv1f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg7e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg7e16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlsseg8.nxv1f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP8]], <vscale x 1 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg8e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg8e16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlsseg2.nxv2f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg2e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg2e16_v_f16mf2(v0, v1, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlsseg3.nxv2f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg3e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg3e16_v_f16mf2(v0, v1, v2, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlsseg4.nxv2f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg4e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg4e16_v_f16mf2(v0, v1, v2, v3, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlsseg5.nxv2f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg5e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg5e16_v_f16mf2(v0, v1, v2, v3, v4, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlsseg6.nxv2f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg6e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg6e16_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlsseg7.nxv2f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg7e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg7e16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlsseg8.nxv2f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP8]], <vscale x 2 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg8e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg8e16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlsseg2.nxv4f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg2e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg2e16_v_f16m1(v0, v1, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlsseg3.nxv4f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg3e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg3e16_v_f16m1(v0, v1, v2, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlsseg4.nxv4f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg4e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg4e16_v_f16m1(v0, v1, v2, v3, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlsseg5.nxv4f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg5e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg5e16_v_f16m1(v0, v1, v2, v3, v4, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlsseg6.nxv4f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg6e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg6e16_v_f16m1(v0, v1, v2, v3, v4, v5, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlsseg7.nxv4f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg7e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg7e16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlsseg8.nxv4f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP8]], <vscale x 4 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg8e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg8e16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vlsseg2.nxv8f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg2e16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg2e16_v_f16m2(v0, v1, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vlsseg3.nxv8f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg3e16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg3e16_v_f16m2(v0, v1, v2, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vlsseg4.nxv8f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP4]], <vscale x 8 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg4e16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg4e16_v_f16m2(v0, v1, v2, v3, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half> } @llvm.riscv.vlsseg2.nxv16f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP1]], <vscale x 16 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP2]], <vscale x 16 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg2e16_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg2e16_v_f16m4(v0, v1, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlsseg2.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg2e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg2e16_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlsseg3.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg3e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg3e16_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlsseg4.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg4e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg4e16_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlsseg5.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg5e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg5e16_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlsseg6.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg6e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg6e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlsseg7.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], <vscale x 1 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg7e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg7e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vlsseg8.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], <vscale x 1 x half> [[MASKEDOFF6:%.*]], <vscale x 1 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP8]], <vscale x 1 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg8e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg8e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlsseg2.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg2e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg2e16_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlsseg3.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg3e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg3e16_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlsseg4.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg4e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg4e16_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlsseg5.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg5e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg5e16_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlsseg6.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg6e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg6e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlsseg7.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], <vscale x 2 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg7e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg7e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vlsseg8.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], <vscale x 2 x half> [[MASKEDOFF6:%.*]], <vscale x 2 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP8]], <vscale x 2 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg8e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg8e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlsseg2.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg2e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg2e16_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlsseg3.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg3e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg3e16_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlsseg4.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg4e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg4e16_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlsseg5.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg5e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg5e16_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlsseg6.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg6e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg6e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlsseg7.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], <vscale x 4 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg7e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg7e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vlsseg8.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], <vscale x 4 x half> [[MASKEDOFF6:%.*]], <vscale x 4 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP8]], <vscale x 4 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg8e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg8e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vlsseg2.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg2e16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg2e16_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vlsseg3.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], <vscale x 8 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg3e16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg3e16_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vlsseg4.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], <vscale x 8 x half> [[MASKEDOFF2:%.*]], <vscale x 8 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP4]], <vscale x 8 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg4e16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg4e16_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half> } @llvm.riscv.vlsseg2.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF0:%.*]], <vscale x 16 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP1]], <vscale x 16 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP2]], <vscale x 16 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg2e16_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg2e16_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c
index 8c71e0ec82db..ece36400659e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -11,7 +12,7 @@
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
-vint8mf8_t testuxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl) {
+vint8mf8_t test_vluxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl) {
   return vluxei8_v_i8mf8(base, bindex, vl);
 }
 
@@ -21,7 +22,7 @@ vint8mf8_t testuxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
-vint8mf4_t testuxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl) {
+vint8mf4_t test_vluxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl) {
   return vluxei8_v_i8mf4(base, bindex, vl);
 }
 
@@ -31,7 +32,7 @@ vint8mf4_t testuxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
-vint8mf2_t testuxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl) {
+vint8mf2_t test_vluxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl) {
   return vluxei8_v_i8mf2(base, bindex, vl);
 }
 
@@ -41,7 +42,7 @@ vint8mf2_t testuxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
-vint8m1_t testuxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) {
+vint8m1_t test_vluxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) {
   return vluxei8_v_i8m1(base, bindex, vl);
 }
 
@@ -51,7 +52,7 @@ vint8m1_t testuxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
-vint8m2_t testuxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) {
+vint8m2_t test_vluxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) {
   return vluxei8_v_i8m2(base, bindex, vl);
 }
 
@@ -61,7 +62,7 @@ vint8m2_t testuxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
-vint8m4_t testuxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) {
+vint8m4_t test_vluxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) {
   return vluxei8_v_i8m4(base, bindex, vl);
 }
 
@@ -71,7 +72,7 @@ vint8m4_t testuxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP1]]
 //
-vint8m8_t testuxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) {
+vint8m8_t test_vluxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) {
   return vluxei8_v_i8m8(base, bindex, vl);
 }
 
@@ -81,7 +82,7 @@ vint8m8_t testuxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
-vint8mf8_t testuxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl) {
+vint8mf8_t test_vluxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl) {
   return vluxei16_v_i8mf8(base, bindex, vl);
 }
 
@@ -91,7 +92,7 @@ vint8mf8_t testuxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
-vint8mf4_t testuxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl) {
+vint8mf4_t test_vluxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl) {
   return vluxei16_v_i8mf4(base, bindex, vl);
 }
 
@@ -101,7 +102,7 @@ vint8mf4_t testuxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
-vint8mf2_t testuxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl) {
+vint8mf2_t test_vluxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl) {
   return vluxei16_v_i8mf2(base, bindex, vl);
 }
 
@@ -111,7 +112,7 @@ vint8mf2_t testuxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
-vint8m1_t testuxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) {
+vint8m1_t test_vluxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) {
   return vluxei16_v_i8m1(base, bindex, vl);
 }
 
@@ -121,7 +122,7 @@ vint8m1_t testuxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
-vint8m2_t testuxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) {
+vint8m2_t test_vluxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) {
   return vluxei16_v_i8m2(base, bindex, vl);
 }
 
@@ -131,7 +132,7 @@ vint8m2_t testuxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
-vint8m4_t testuxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) {
+vint8m4_t test_vluxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) {
   return vluxei16_v_i8m4(base, bindex, vl);
 }
 
@@ -141,7 +142,7 @@ vint8m4_t testuxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
-vint8mf8_t testuxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl) {
+vint8mf8_t test_vluxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl) {
   return vluxei32_v_i8mf8(base, bindex, vl);
 }
 
@@ -151,7 +152,7 @@ vint8mf8_t testuxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
-vint8mf4_t testuxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl) {
+vint8mf4_t test_vluxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl) {
   return vluxei32_v_i8mf4(base, bindex, vl);
 }
 
@@ -161,7 +162,7 @@ vint8mf4_t testuxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
-vint8mf2_t testuxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl) {
+vint8mf2_t test_vluxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl) {
   return vluxei32_v_i8mf2(base, bindex, vl);
 }
 
@@ -171,7 +172,7 @@ vint8mf2_t testuxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
-vint8m1_t testuxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) {
+vint8m1_t test_vluxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) {
   return vluxei32_v_i8m1(base, bindex, vl);
 }
 
@@ -181,7 +182,7 @@ vint8m1_t testuxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
-vint8m2_t testuxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) {
+vint8m2_t test_vluxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) {
   return vluxei32_v_i8m2(base, bindex, vl);
 }
 
@@ -191,7 +192,7 @@ vint8m2_t testuxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
-vint8mf8_t testuxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl) {
+vint8mf8_t test_vluxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl) {
   return vluxei64_v_i8mf8(base, bindex, vl);
 }
 
@@ -201,7 +202,7 @@ vint8mf8_t testuxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
-vint8mf4_t testuxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl) {
+vint8mf4_t test_vluxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl) {
   return vluxei64_v_i8mf4(base, bindex, vl);
 }
 
@@ -211,7 +212,7 @@ vint8mf4_t testuxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
-vint8mf2_t testuxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl) {
+vint8mf2_t test_vluxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl) {
   return vluxei64_v_i8mf2(base, bindex, vl);
 }
 
@@ -221,7 +222,7 @@ vint8mf2_t testuxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
-vint8m1_t testuxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) {
+vint8m1_t test_vluxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) {
   return vluxei64_v_i8m1(base, bindex, vl);
 }
 
@@ -231,7 +232,7 @@ vint8m1_t testuxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
-vint16mf4_t testuxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t vl) {
+vint16mf4_t test_vluxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t vl) {
   return vluxei8_v_i16mf4(base, bindex, vl);
 }
 
@@ -241,7 +242,7 @@ vint16mf4_t testuxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
-vint16mf2_t testuxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t vl) {
+vint16mf2_t test_vluxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t vl) {
   return vluxei8_v_i16mf2(base, bindex, vl);
 }
 
@@ -251,7 +252,7 @@ vint16mf2_t testuxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
-vint16m1_t testuxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl) {
+vint16m1_t test_vluxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl) {
   return vluxei8_v_i16m1(base, bindex, vl);
 }
 
@@ -261,7 +262,7 @@ vint16m1_t testuxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
-vint16m2_t testuxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl) {
+vint16m2_t test_vluxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl) {
   return vluxei8_v_i16m2(base, bindex, vl);
 }
 
@@ -271,7 +272,7 @@ vint16m2_t testuxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
-vint16m4_t testuxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl) {
+vint16m4_t test_vluxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl) {
   return vluxei8_v_i16m4(base, bindex, vl);
 }
 
@@ -281,7 +282,7 @@ vint16m4_t testuxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
-vint16m8_t testuxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl) {
+vint16m8_t test_vluxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl) {
   return vluxei8_v_i16m8(base, bindex, vl);
 }
 
@@ -291,7 +292,7 @@ vint16m8_t testuxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
-vint16mf4_t testuxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t vl) {
+vint16mf4_t test_vluxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t vl) {
   return vluxei16_v_i16mf4(base, bindex, vl);
 }
 
@@ -301,7 +302,7 @@ vint16mf4_t testuxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
-vint16mf2_t testuxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t vl) {
+vint16mf2_t test_vluxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t vl) {
   return vluxei16_v_i16mf2(base, bindex, vl);
 }
 
@@ -311,7 +312,7 @@ vint16mf2_t testuxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
-vint16m1_t testuxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl) {
+vint16m1_t test_vluxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl) {
   return vluxei16_v_i16m1(base, bindex, vl);
 }
 
@@ -321,7 +322,7 @@ vint16m1_t testuxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
-vint16m2_t testuxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl) {
+vint16m2_t test_vluxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl) {
   return vluxei16_v_i16m2(base, bindex, vl);
 }
 
@@ -331,7 +332,7 @@ vint16m2_t testuxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
-vint16m4_t testuxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl) {
+vint16m4_t test_vluxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl) {
   return vluxei16_v_i16m4(base, bindex, vl);
 }
 
@@ -341,7 +342,7 @@ vint16m4_t testuxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
-vint16m8_t testuxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl) {
+vint16m8_t test_vluxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl) {
   return vluxei16_v_i16m8(base, bindex, vl);
 }
 
@@ -351,7 +352,7 @@ vint16m8_t testuxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
-vint16mf4_t testuxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t vl) {
+vint16mf4_t test_vluxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t vl) {
   return vluxei32_v_i16mf4(base, bindex, vl);
 }
 
@@ -361,7 +362,7 @@ vint16mf4_t testuxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
-vint16mf2_t testuxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t vl) {
+vint16mf2_t test_vluxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t vl) {
   return vluxei32_v_i16mf2(base, bindex, vl);
 }
 
@@ -371,7 +372,7 @@ vint16mf2_t testuxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
-vint16m1_t testuxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl) {
+vint16m1_t test_vluxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl) {
   return vluxei32_v_i16m1(base, bindex, vl);
 }
 
@@ -381,7 +382,7 @@ vint16m1_t testuxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
-vint16m2_t testuxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl) {
+vint16m2_t test_vluxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl) {
   return vluxei32_v_i16m2(base, bindex, vl);
 }
 
@@ -391,7 +392,7 @@ vint16m2_t testuxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
-vint16m4_t testuxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl) {
+vint16m4_t test_vluxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl) {
   return vluxei32_v_i16m4(base, bindex, vl);
 }
 
@@ -401,7 +402,7 @@ vint16m4_t testuxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
-vint16mf4_t testuxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t vl) {
+vint16mf4_t test_vluxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t vl) {
   return vluxei64_v_i16mf4(base, bindex, vl);
 }
 
@@ -411,7 +412,7 @@ vint16mf4_t testuxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
-vint16mf2_t testuxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t vl) {
+vint16mf2_t test_vluxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t vl) {
   return vluxei64_v_i16mf2(base, bindex, vl);
 }
 
@@ -421,7 +422,7 @@ vint16mf2_t testuxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
-vint16m1_t testuxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl) {
+vint16m1_t test_vluxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl) {
   return vluxei64_v_i16m1(base, bindex, vl);
 }
 
@@ -431,7 +432,7 @@ vint16m1_t testuxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
-vint16m2_t testuxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl) {
+vint16m2_t test_vluxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl) {
   return vluxei64_v_i16m2(base, bindex, vl);
 }
 
@@ -441,7 +442,7 @@ vint16m2_t testuxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
-vint32mf2_t testuxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t vl) {
+vint32mf2_t test_vluxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t vl) {
   return vluxei8_v_i32mf2(base, bindex, vl);
 }
 
@@ -451,7 +452,7 @@ vint32mf2_t testuxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
-vint32m1_t testuxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl) {
+vint32m1_t test_vluxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl) {
   return vluxei8_v_i32m1(base, bindex, vl);
 }
 
@@ -461,7 +462,7 @@ vint32m1_t testuxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
-vint32m2_t testuxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl) {
+vint32m2_t test_vluxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl) {
   return vluxei8_v_i32m2(base, bindex, vl);
 }
 
@@ -471,7 +472,7 @@ vint32m2_t testuxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
-vint32m4_t testuxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl) {
+vint32m4_t test_vluxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl) {
   return vluxei8_v_i32m4(base, bindex, vl);
 }
 
@@ -481,7 +482,7 @@ vint32m4_t testuxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
-vint32m8_t testuxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl) {
+vint32m8_t test_vluxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl) {
   return vluxei8_v_i32m8(base, bindex, vl);
 }
 
@@ -491,7 +492,7 @@ vint32m8_t testuxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
-vint32mf2_t testuxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t vl) {
+vint32mf2_t test_vluxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t vl) {
   return vluxei16_v_i32mf2(base, bindex, vl);
 }
 
@@ -501,7 +502,7 @@ vint32mf2_t testuxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
-vint32m1_t testuxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t vl) {
+vint32m1_t test_vluxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t vl) {
   return vluxei16_v_i32m1(base, bindex, vl);
 }
 
@@ -511,7 +512,7 @@ vint32m1_t testuxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
-vint32m2_t testuxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl) {
+vint32m2_t test_vluxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl) {
   return vluxei16_v_i32m2(base, bindex, vl);
 }
 
@@ -521,7 +522,7 @@ vint32m2_t testuxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
-vint32m4_t testuxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl) {
+vint32m4_t test_vluxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl) {
   return vluxei16_v_i32m4(base, bindex, vl);
 }
 
@@ -531,7 +532,7 @@ vint32m4_t testuxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
-vint32m8_t testuxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl) {
+vint32m8_t test_vluxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl) {
   return vluxei16_v_i32m8(base, bindex, vl);
 }
 
@@ -541,7 +542,7 @@ vint32m8_t testuxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
-vint32mf2_t testuxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+vint32mf2_t test_vluxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t vl) {
   return vluxei32_v_i32mf2(base, bindex, vl);
 }
 
@@ -551,7 +552,7 @@ vint32mf2_t testuxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
-vint32m1_t testuxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl) {
+vint32m1_t test_vluxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl) {
   return vluxei32_v_i32m1(base, bindex, vl);
 }
 
@@ -561,7 +562,7 @@ vint32m1_t testuxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
-vint32m2_t testuxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl) {
+vint32m2_t test_vluxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl) {
   return vluxei32_v_i32m2(base, bindex, vl);
 }
 
@@ -571,7 +572,7 @@ vint32m2_t testuxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
-vint32m4_t testuxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl) {
+vint32m4_t test_vluxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl) {
   return vluxei32_v_i32m4(base, bindex, vl);
 }
 
@@ -581,7 +582,7 @@ vint32m4_t testuxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
-vint32m8_t testuxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl) {
+vint32m8_t test_vluxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl) {
   return vluxei32_v_i32m8(base, bindex, vl);
 }
 
@@ -591,7 +592,7 @@ vint32m8_t testuxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
-vint32mf2_t testuxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t vl) {
+vint32mf2_t test_vluxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t vl) {
   return vluxei64_v_i32mf2(base, bindex, vl);
 }
 
@@ -601,7 +602,7 @@ vint32mf2_t testuxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
-vint32m1_t testuxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl) {
+vint32m1_t test_vluxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl) {
   return vluxei64_v_i32m1(base, bindex, vl);
 }
 
@@ -611,7 +612,7 @@ vint32m1_t testuxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
-vint32m2_t testuxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl) {
+vint32m2_t test_vluxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl) {
   return vluxei64_v_i32m2(base, bindex, vl);
 }
 
@@ -621,7 +622,7 @@ vint32m2_t testuxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
-vint32m4_t testuxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl) {
+vint32m4_t test_vluxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl) {
   return vluxei64_v_i32m4(base, bindex, vl);
 }
 
@@ -631,7 +632,7 @@ vint32m4_t testuxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
-vint64m1_t testuxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl) {
+vint64m1_t test_vluxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl) {
   return vluxei8_v_i64m1(base, bindex, vl);
 }
 
@@ -641,7 +642,7 @@ vint64m1_t testuxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
-vint64m2_t testuxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl) {
+vint64m2_t test_vluxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl) {
   return vluxei8_v_i64m2(base, bindex, vl);
 }
 
@@ -651,7 +652,7 @@ vint64m2_t testuxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
-vint64m4_t testuxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl) {
+vint64m4_t test_vluxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl) {
   return vluxei8_v_i64m4(base, bindex, vl);
 }
 
@@ -661,7 +662,7 @@ vint64m4_t testuxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
-vint64m8_t testuxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl) {
+vint64m8_t test_vluxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl) {
   return vluxei8_v_i64m8(base, bindex, vl);
 }
 
@@ -671,7 +672,7 @@ vint64m8_t testuxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
-vint64m1_t testuxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t vl) {
+vint64m1_t test_vluxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t vl) {
   return vluxei16_v_i64m1(base, bindex, vl);
 }
 
@@ -681,7 +682,7 @@ vint64m1_t testuxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
-vint64m2_t testuxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t vl) {
+vint64m2_t test_vluxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t vl) {
   return vluxei16_v_i64m2(base, bindex, vl);
 }
 
@@ -691,7 +692,7 @@ vint64m2_t testuxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
-vint64m4_t testuxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl) {
+vint64m4_t test_vluxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl) {
   return vluxei16_v_i64m4(base, bindex, vl);
 }
 
@@ -701,7 +702,7 @@ vint64m4_t testuxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
-vint64m8_t testuxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl) {
+vint64m8_t test_vluxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl) {
   return vluxei16_v_i64m8(base, bindex, vl);
 }
 
@@ -711,7 +712,7 @@ vint64m8_t testuxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
-vint64m1_t testuxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t vl) {
+vint64m1_t test_vluxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t vl) {
   return vluxei32_v_i64m1(base, bindex, vl);
 }
 
@@ -721,7 +722,7 @@ vint64m1_t testuxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
-vint64m2_t testuxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl) {
+vint64m2_t test_vluxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl) {
   return vluxei32_v_i64m2(base, bindex, vl);
 }
 
@@ -731,7 +732,7 @@ vint64m2_t testuxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
-vint64m4_t testuxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl) {
+vint64m4_t test_vluxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl) {
   return vluxei32_v_i64m4(base, bindex, vl);
 }
 
@@ -741,7 +742,7 @@ vint64m4_t testuxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
-vint64m8_t testuxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl) {
+vint64m8_t test_vluxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl) {
   return vluxei32_v_i64m8(base, bindex, vl);
 }
 
@@ -751,7 +752,7 @@ vint64m8_t testuxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
-vint64m1_t testuxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl) {
+vint64m1_t test_vluxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl) {
   return vluxei64_v_i64m1(base, bindex, vl);
 }
 
@@ -761,7 +762,7 @@ vint64m1_t testuxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
-vint64m2_t testuxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl) {
+vint64m2_t test_vluxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl) {
   return vluxei64_v_i64m2(base, bindex, vl);
 }
 
@@ -771,7 +772,7 @@ vint64m2_t testuxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
-vint64m4_t testuxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl) {
+vint64m4_t test_vluxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl) {
   return vluxei64_v_i64m4(base, bindex, vl);
 }
 
@@ -781,7 +782,7 @@ vint64m4_t testuxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
-vint64m8_t testuxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl) {
+vint64m8_t test_vluxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl) {
   return vluxei64_v_i64m8(base, bindex, vl);
 }
 
@@ -791,7 +792,7 @@ vint64m8_t testuxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
-vuint8mf8_t testuxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
+vuint8mf8_t test_vluxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
   return vluxei8_v_u8mf8(base, bindex, vl);
 }
 
@@ -801,7 +802,7 @@ vuint8mf8_t testuxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
-vuint8mf4_t testuxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
+vuint8mf4_t test_vluxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
   return vluxei8_v_u8mf4(base, bindex, vl);
 }
 
@@ -811,7 +812,7 @@ vuint8mf4_t testuxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
-vuint8mf2_t testuxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
+vuint8mf2_t test_vluxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
   return vluxei8_v_u8mf2(base, bindex, vl);
 }
 
@@ -821,7 +822,7 @@ vuint8mf2_t testuxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
-vuint8m1_t testuxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) {
+vuint8m1_t test_vluxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) {
   return vluxei8_v_u8m1(base, bindex, vl);
 }
 
@@ -831,7 +832,7 @@ vuint8m1_t testuxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
-vuint8m2_t testuxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) {
+vuint8m2_t test_vluxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) {
   return vluxei8_v_u8m2(base, bindex, vl);
 }
 
@@ -841,7 +842,7 @@ vuint8m2_t testuxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
-vuint8m4_t testuxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) {
+vuint8m4_t test_vluxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) {
   return vluxei8_v_u8m4(base, bindex, vl);
 }
 
@@ -851,7 +852,7 @@ vuint8m4_t testuxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP1]]
 //
-vuint8m8_t testuxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) {
+vuint8m8_t test_vluxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) {
   return vluxei8_v_u8m8(base, bindex, vl);
 }
 
@@ -861,7 +862,7 @@ vuint8m8_t testuxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
-vuint8mf8_t testuxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
+vuint8mf8_t test_vluxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
   return vluxei16_v_u8mf8(base, bindex, vl);
 }
 
@@ -871,7 +872,7 @@ vuint8mf8_t testuxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
-vuint8mf4_t testuxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
+vuint8mf4_t test_vluxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
   return vluxei16_v_u8mf4(base, bindex, vl);
 }
 
@@ -881,7 +882,7 @@ vuint8mf4_t testuxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
-vuint8mf2_t testuxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t vl) {
+vuint8mf2_t test_vluxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t vl) {
   return vluxei16_v_u8mf2(base, bindex, vl);
 }
 
@@ -891,7 +892,7 @@ vuint8mf2_t testuxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
-vuint8m1_t testuxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl) {
+vuint8m1_t test_vluxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl) {
   return vluxei16_v_u8m1(base, bindex, vl);
 }
 
@@ -901,7 +902,7 @@ vuint8m1_t testuxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
-vuint8m2_t testuxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl) {
+vuint8m2_t test_vluxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl) {
   return vluxei16_v_u8m2(base, bindex, vl);
 }
 
@@ -911,7 +912,7 @@ vuint8m2_t testuxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
-vuint8m4_t testuxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl) {
+vuint8m4_t test_vluxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl) {
   return vluxei16_v_u8m4(base, bindex, vl);
 }
 
@@ -921,7 +922,7 @@ vuint8m4_t testuxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
-vuint8mf8_t testuxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
+vuint8mf8_t test_vluxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
   return vluxei32_v_u8mf8(base, bindex, vl);
 }
 
@@ -931,7 +932,7 @@ vuint8mf8_t testuxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
-vuint8mf4_t testuxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t vl) {
+vuint8mf4_t test_vluxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t vl) {
   return vluxei32_v_u8mf4(base, bindex, vl);
 }
 
@@ -941,7 +942,7 @@ vuint8mf4_t testuxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
-vuint8mf2_t testuxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t vl) {
+vuint8mf2_t test_vluxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t vl) {
   return vluxei32_v_u8mf2(base, bindex, vl);
 }
 
@@ -951,7 +952,7 @@ vuint8mf2_t testuxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
-vuint8m1_t testuxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl) {
+vuint8m1_t test_vluxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl) {
   return vluxei32_v_u8m1(base, bindex, vl);
 }
 
@@ -961,7 +962,7 @@ vuint8m1_t testuxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
-vuint8m2_t testuxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl) {
+vuint8m2_t test_vluxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl) {
   return vluxei32_v_u8m2(base, bindex, vl);
 }
 
@@ -971,7 +972,7 @@ vuint8m2_t testuxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
-vuint8mf8_t testuxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t vl) {
+vuint8mf8_t test_vluxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t vl) {
   return vluxei64_v_u8mf8(base, bindex, vl);
 }
 
@@ -981,7 +982,7 @@ vuint8mf8_t testuxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
-vuint8mf4_t testuxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t vl) {
+vuint8mf4_t test_vluxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t vl) {
   return vluxei64_v_u8mf4(base, bindex, vl);
 }
 
@@ -991,7 +992,7 @@ vuint8mf4_t testuxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
-vuint8mf2_t testuxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t vl) {
+vuint8mf2_t test_vluxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t vl) {
   return vluxei64_v_u8mf2(base, bindex, vl);
 }
 
@@ -1001,7 +1002,7 @@ vuint8mf2_t testuxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
-vuint8m1_t testuxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl) {
+vuint8m1_t test_vluxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl) {
   return vluxei64_v_u8m1(base, bindex, vl);
 }
 
@@ -1011,7 +1012,7 @@ vuint8m1_t testuxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
-vuint16mf4_t testuxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
+vuint16mf4_t test_vluxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
   return vluxei8_v_u16mf4(base, bindex, vl);
 }
 
@@ -1021,7 +1022,7 @@ vuint16mf4_t testuxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
-vuint16mf2_t testuxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
+vuint16mf2_t test_vluxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
   return vluxei8_v_u16mf2(base, bindex, vl);
 }
 
@@ -1031,7 +1032,7 @@ vuint16mf2_t testuxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
-vuint16m1_t testuxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
+vuint16m1_t test_vluxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
   return vluxei8_v_u16m1(base, bindex, vl);
 }
 
@@ -1041,7 +1042,7 @@ vuint16m1_t testuxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
-vuint16m2_t testuxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl) {
+vuint16m2_t test_vluxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl) {
   return vluxei8_v_u16m2(base, bindex, vl);
 }
 
@@ -1051,7 +1052,7 @@ vuint16m2_t testuxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
-vuint16m4_t testuxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl) {
+vuint16m4_t test_vluxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl) {
   return vluxei8_v_u16m4(base, bindex, vl);
 }
 
@@ -1061,7 +1062,7 @@ vuint16m4_t testuxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
-vuint16m8_t testuxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl) {
+vuint16m8_t test_vluxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl) {
   return vluxei8_v_u16m8(base, bindex, vl);
 }
 
@@ -1071,7 +1072,7 @@ vuint16m8_t testuxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
-vuint16mf4_t testuxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
+vuint16mf4_t test_vluxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
   return vluxei16_v_u16mf4(base, bindex, vl);
 }
 
@@ -1081,7 +1082,7 @@ vuint16mf4_t testuxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
-vuint16mf2_t testuxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
+vuint16mf2_t test_vluxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
   return vluxei16_v_u16mf2(base, bindex, vl);
 }
 
@@ -1091,7 +1092,7 @@ vuint16mf2_t testuxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
-vuint16m1_t testuxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t vl) {
+vuint16m1_t test_vluxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t vl) {
   return vluxei16_v_u16m1(base, bindex, vl);
 }
 
@@ -1101,7 +1102,7 @@ vuint16m1_t testuxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
-vuint16m2_t testuxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t vl) {
+vuint16m2_t test_vluxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t vl) {
   return vluxei16_v_u16m2(base, bindex, vl);
 }
 
@@ -1111,7 +1112,7 @@ vuint16m2_t testuxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
-vuint16m4_t testuxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t vl) {
+vuint16m4_t test_vluxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t vl) {
   return vluxei16_v_u16m4(base, bindex, vl);
 }
 
@@ -1121,7 +1122,7 @@ vuint16m4_t testuxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
-vuint16m8_t testuxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t vl) {
+vuint16m8_t test_vluxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t vl) {
   return vluxei16_v_u16m8(base, bindex, vl);
 }
 
@@ -1131,7 +1132,7 @@ vuint16m8_t testuxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
-vuint16mf4_t testuxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
+vuint16mf4_t test_vluxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
   return vluxei32_v_u16mf4(base, bindex, vl);
 }
 
@@ -1141,7 +1142,7 @@ vuint16mf4_t testuxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
-vuint16mf2_t testuxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_t vl) {
+vuint16mf2_t test_vluxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_t vl) {
   return vluxei32_v_u16mf2(base, bindex, vl);
 }
 
@@ -1151,7 +1152,7 @@ vuint16mf2_t testuxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
-vuint16m1_t testuxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t vl) {
+vuint16m1_t test_vluxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t vl) {
   return vluxei32_v_u16m1(base, bindex, vl);
 }
 
@@ -1161,7 +1162,7 @@ vuint16m1_t testuxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
-vuint16m2_t testuxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t vl) {
+vuint16m2_t test_vluxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t vl) {
   return vluxei32_v_u16m2(base, bindex, vl);
 }
 
@@ -1171,7 +1172,7 @@ vuint16m2_t testuxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
-vuint16m4_t testuxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t vl) {
+vuint16m4_t test_vluxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t vl) {
   return vluxei32_v_u16m4(base, bindex, vl);
 }
 
@@ -1181,7 +1182,7 @@ vuint16m4_t testuxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
-vuint16mf4_t testuxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_t vl) {
+vuint16mf4_t test_vluxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_t vl) {
   return vluxei64_v_u16mf4(base, bindex, vl);
 }
 
@@ -1191,7 +1192,7 @@ vuint16mf4_t testuxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
-vuint16mf2_t testuxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_t vl) {
+vuint16mf2_t test_vluxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_t vl) {
   return vluxei64_v_u16mf2(base, bindex, vl);
 }
 
@@ -1201,7 +1202,7 @@ vuint16mf2_t testuxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
-vuint16m1_t testuxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t vl) {
+vuint16m1_t test_vluxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t vl) {
   return vluxei64_v_u16m1(base, bindex, vl);
 }
 
@@ -1211,7 +1212,7 @@ vuint16m1_t testuxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
-vuint16m2_t testuxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t vl) {
+vuint16m2_t test_vluxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t vl) {
   return vluxei64_v_u16m2(base, bindex, vl);
 }
 
@@ -1221,7 +1222,7 @@ vuint16m2_t testuxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
-vuint32mf2_t testuxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
+vuint32mf2_t test_vluxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
   return vluxei8_v_u32mf2(base, bindex, vl);
 }
 
@@ -1231,7 +1232,7 @@ vuint32mf2_t testuxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
-vuint32m1_t testuxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
+vuint32m1_t test_vluxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
   return vluxei8_v_u32m1(base, bindex, vl);
 }
 
@@ -1241,7 +1242,7 @@ vuint32m1_t testuxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
-vuint32m2_t testuxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
+vuint32m2_t test_vluxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
   return vluxei8_v_u32m2(base, bindex, vl);
 }
 
@@ -1251,7 +1252,7 @@ vuint32m2_t testuxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
-vuint32m4_t testuxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl) {
+vuint32m4_t test_vluxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl) {
   return vluxei8_v_u32m4(base, bindex, vl);
 }
 
@@ -1261,7 +1262,7 @@ vuint32m4_t testuxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
-vuint32m8_t testuxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl) {
+vuint32m8_t test_vluxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl) {
   return vluxei8_v_u32m8(base, bindex, vl);
 }
 
@@ -1271,7 +1272,7 @@ vuint32m8_t testuxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
-vuint32mf2_t testuxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
+vuint32mf2_t test_vluxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
   return vluxei16_v_u32mf2(base, bindex, vl);
 }
 
@@ -1281,7 +1282,7 @@ vuint32mf2_t testuxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
-vuint32m1_t testuxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
+vuint32m1_t test_vluxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
   return vluxei16_v_u32m1(base, bindex, vl);
 }
 
@@ -1291,7 +1292,7 @@ vuint32m1_t testuxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
-vuint32m2_t testuxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t vl) {
+vuint32m2_t test_vluxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t vl) {
   return vluxei16_v_u32m2(base, bindex, vl);
 }
 
@@ -1301,7 +1302,7 @@ vuint32m2_t testuxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
-vuint32m4_t testuxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t vl) {
+vuint32m4_t test_vluxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t vl) {
   return vluxei16_v_u32m4(base, bindex, vl);
 }
 
@@ -1311,7 +1312,7 @@ vuint32m4_t testuxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
-vuint32m8_t testuxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t vl) {
+vuint32m8_t test_vluxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t vl) {
   return vluxei16_v_u32m8(base, bindex, vl);
 }
 
@@ -1321,7 +1322,7 @@ vuint32m8_t testuxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
-vuint32mf2_t testuxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
+vuint32mf2_t test_vluxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
   return vluxei32_v_u32mf2(base, bindex, vl);
 }
 
@@ -1331,7 +1332,7 @@ vuint32mf2_t testuxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
-vuint32m1_t testuxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t vl) {
+vuint32m1_t test_vluxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t vl) {
   return vluxei32_v_u32m1(base, bindex, vl);
 }
 
@@ -1341,7 +1342,7 @@ vuint32m1_t testuxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
-vuint32m2_t testuxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t vl) {
+vuint32m2_t test_vluxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t vl) {
   return vluxei32_v_u32m2(base, bindex, vl);
 }
 
@@ -1351,7 +1352,7 @@ vuint32m2_t testuxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
-vuint32m4_t testuxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t vl) {
+vuint32m4_t test_vluxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t vl) {
   return vluxei32_v_u32m4(base, bindex, vl);
 }
 
@@ -1361,7 +1362,7 @@ vuint32m4_t testuxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
-vuint32m8_t testuxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t vl) {
+vuint32m8_t test_vluxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t vl) {
   return vluxei32_v_u32m8(base, bindex, vl);
 }
 
@@ -1371,7 +1372,7 @@ vuint32m8_t testuxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
-vuint32mf2_t testuxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_t vl) {
+vuint32mf2_t test_vluxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_t vl) {
   return vluxei64_v_u32mf2(base, bindex, vl);
 }
 
@@ -1381,7 +1382,7 @@ vuint32mf2_t testuxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
-vuint32m1_t testuxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t vl) {
+vuint32m1_t test_vluxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t vl) {
   return vluxei64_v_u32m1(base, bindex, vl);
 }
 
@@ -1391,7 +1392,7 @@ vuint32m1_t testuxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
-vuint32m2_t testuxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t vl) {
+vuint32m2_t test_vluxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t vl) {
   return vluxei64_v_u32m2(base, bindex, vl);
 }
 
@@ -1401,7 +1402,7 @@ vuint32m2_t testuxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
-vuint32m4_t testuxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t vl) {
+vuint32m4_t test_vluxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t vl) {
   return vluxei64_v_u32m4(base, bindex, vl);
 }
 
@@ -1411,7 +1412,7 @@ vuint32m4_t testuxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
-vuint64m1_t testuxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
+vuint64m1_t test_vluxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
   return vluxei8_v_u64m1(base, bindex, vl);
 }
 
@@ -1421,7 +1422,7 @@ vuint64m1_t testuxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
-vuint64m2_t testuxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
+vuint64m2_t test_vluxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
   return vluxei8_v_u64m2(base, bindex, vl);
 }
 
@@ -1431,7 +1432,7 @@ vuint64m2_t testuxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
-vuint64m4_t testuxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
+vuint64m4_t test_vluxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
   return vluxei8_v_u64m4(base, bindex, vl);
 }
 
@@ -1441,7 +1442,7 @@ vuint64m4_t testuxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
-vuint64m8_t testuxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl) {
+vuint64m8_t test_vluxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl) {
   return vluxei8_v_u64m8(base, bindex, vl);
 }
 
@@ -1451,7 +1452,7 @@ vuint64m8_t testuxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
-vuint64m1_t testuxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
+vuint64m1_t test_vluxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
   return vluxei16_v_u64m1(base, bindex, vl);
 }
 
@@ -1461,7 +1462,7 @@ vuint64m1_t testuxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
-vuint64m2_t testuxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
+vuint64m2_t test_vluxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
   return vluxei16_v_u64m2(base, bindex, vl);
 }
 
@@ -1471,7 +1472,7 @@ vuint64m2_t testuxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
-vuint64m4_t testuxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t vl) {
+vuint64m4_t test_vluxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t vl) {
   return vluxei16_v_u64m4(base, bindex, vl);
 }
 
@@ -1481,7 +1482,7 @@ vuint64m4_t testuxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
-vuint64m8_t testuxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t vl) {
+vuint64m8_t test_vluxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t vl) {
   return vluxei16_v_u64m8(base, bindex, vl);
 }
 
@@ -1491,7 +1492,7 @@ vuint64m8_t testuxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
-vuint64m1_t testuxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
+vuint64m1_t test_vluxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
   return vluxei32_v_u64m1(base, bindex, vl);
 }
 
@@ -1501,7 +1502,7 @@ vuint64m1_t testuxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
-vuint64m2_t testuxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t vl) {
+vuint64m2_t test_vluxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t vl) {
   return vluxei32_v_u64m2(base, bindex, vl);
 }
 
@@ -1511,7 +1512,7 @@ vuint64m2_t testuxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
-vuint64m4_t testuxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t vl) {
+vuint64m4_t test_vluxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t vl) {
   return vluxei32_v_u64m4(base, bindex, vl);
 }
 
@@ -1521,7 +1522,7 @@ vuint64m4_t testuxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
-vuint64m8_t testuxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t vl) {
+vuint64m8_t test_vluxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t vl) {
   return vluxei32_v_u64m8(base, bindex, vl);
 }
 
@@ -1531,7 +1532,7 @@ vuint64m8_t testuxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
-vuint64m1_t testuxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t vl) {
+vuint64m1_t test_vluxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t vl) {
   return vluxei64_v_u64m1(base, bindex, vl);
 }
 
@@ -1541,7 +1542,7 @@ vuint64m1_t testuxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
-vuint64m2_t testuxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t vl) {
+vuint64m2_t test_vluxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t vl) {
   return vluxei64_v_u64m2(base, bindex, vl);
 }
 
@@ -1551,7 +1552,7 @@ vuint64m2_t testuxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
-vuint64m4_t testuxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t vl) {
+vuint64m4_t test_vluxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t vl) {
   return vluxei64_v_u64m4(base, bindex, vl);
 }
 
@@ -1561,7 +1562,7 @@ vuint64m4_t testuxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
-vuint64m8_t testuxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t vl) {
+vuint64m8_t test_vluxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t vl) {
   return vluxei64_v_u64m8(base, bindex, vl);
 }
 
@@ -1571,7 +1572,7 @@ vuint64m8_t testuxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i8.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
-vfloat32mf2_t testuxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_t vl) {
+vfloat32mf2_t test_vluxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_t vl) {
   return vluxei8_v_f32mf2(base, bindex, vl);
 }
 
@@ -1581,7 +1582,7 @@ vfloat32mf2_t testuxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i8.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
-vfloat32m1_t testuxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t vl) {
+vfloat32m1_t test_vluxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t vl) {
   return vluxei8_v_f32m1(base, bindex, vl);
 }
 
@@ -1591,7 +1592,7 @@ vfloat32m1_t testuxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i8.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
-vfloat32m2_t testuxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t vl) {
+vfloat32m2_t test_vluxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t vl) {
   return vluxei8_v_f32m2(base, bindex, vl);
 }
 
@@ -1601,7 +1602,7 @@ vfloat32m2_t testuxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i8.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
-vfloat32m4_t testuxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t vl) {
+vfloat32m4_t test_vluxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t vl) {
   return vluxei8_v_f32m4(base, bindex, vl);
 }
 
@@ -1611,7 +1612,7 @@ vfloat32m4_t testuxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i8.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
 //
-vfloat32m8_t testuxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t vl) {
+vfloat32m8_t test_vluxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t vl) {
   return vluxei8_v_f32m8(base, bindex, vl);
 }
 
@@ -1621,7 +1622,7 @@ vfloat32m8_t testuxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i16.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
-vfloat32mf2_t testuxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, size_t vl) {
+vfloat32mf2_t test_vluxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, size_t vl) {
   return vluxei16_v_f32mf2(base, bindex, vl);
 }
 
@@ -1631,7 +1632,7 @@ vfloat32mf2_t testuxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i16.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
-vfloat32m1_t testuxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_t vl) {
+vfloat32m1_t test_vluxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_t vl) {
   return vluxei16_v_f32m1(base, bindex, vl);
 }
 
@@ -1641,7 +1642,7 @@ vfloat32m1_t testuxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i16.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
-vfloat32m2_t testuxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t vl) {
+vfloat32m2_t test_vluxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t vl) {
   return vluxei16_v_f32m2(base, bindex, vl);
 }
 
@@ -1651,7 +1652,7 @@ vfloat32m2_t testuxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i16.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
-vfloat32m4_t testuxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t vl) {
+vfloat32m4_t test_vluxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t vl) {
   return vluxei16_v_f32m4(base, bindex, vl);
 }
 
@@ -1661,7 +1662,7 @@ vfloat32m4_t testuxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i16.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
 //
-vfloat32m8_t testuxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t vl) {
+vfloat32m8_t test_vluxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t vl) {
   return vluxei16_v_f32m8(base, bindex, vl);
 }
 
@@ -1671,7 +1672,7 @@ vfloat32m8_t testuxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
-vfloat32mf2_t testuxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, size_t vl) {
+vfloat32mf2_t test_vluxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, size_t vl) {
   return vluxei32_v_f32mf2(base, bindex, vl);
 }
 
@@ -1681,7 +1682,7 @@ vfloat32mf2_t testuxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i32.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
-vfloat32m1_t testuxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t vl) {
+vfloat32m1_t test_vluxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t vl) {
   return vluxei32_v_f32m1(base, bindex, vl);
 }
 
@@ -1691,7 +1692,7 @@ vfloat32m1_t testuxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i32.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
-vfloat32m2_t testuxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t vl) {
+vfloat32m2_t test_vluxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t vl) {
   return vluxei32_v_f32m2(base, bindex, vl);
 }
 
@@ -1701,7 +1702,7 @@ vfloat32m2_t testuxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i32.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
-vfloat32m4_t testuxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t vl) {
+vfloat32m4_t test_vluxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t vl) {
   return vluxei32_v_f32m4(base, bindex, vl);
 }
 
@@ -1711,7 +1712,7 @@ vfloat32m4_t testuxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i32.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
 //
-vfloat32m8_t testuxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t vl) {
+vfloat32m8_t test_vluxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t vl) {
   return vluxei32_v_f32m8(base, bindex, vl);
 }
 
@@ -1721,7 +1722,7 @@ vfloat32m8_t testuxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i64.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
-vfloat32mf2_t testuxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size_t vl) {
+vfloat32mf2_t test_vluxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size_t vl) {
   return vluxei64_v_f32mf2(base, bindex, vl);
 }
 
@@ -1731,7 +1732,7 @@ vfloat32mf2_t testuxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i64.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
-vfloat32m1_t testuxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t vl) {
+vfloat32m1_t test_vluxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t vl) {
   return vluxei64_v_f32m1(base, bindex, vl);
 }
 
@@ -1741,7 +1742,7 @@ vfloat32m1_t testuxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i64.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
-vfloat32m2_t testuxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t vl) {
+vfloat32m2_t test_vluxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t vl) {
   return vluxei64_v_f32m2(base, bindex, vl);
 }
 
@@ -1751,7 +1752,7 @@ vfloat32m2_t testuxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i64.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
-vfloat32m4_t testuxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t vl) {
+vfloat32m4_t test_vluxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t vl) {
   return vluxei64_v_f32m4(base, bindex, vl);
 }
 
@@ -1761,7 +1762,7 @@ vfloat32m4_t testuxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i8.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
-vfloat64m1_t testuxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t vl) {
+vfloat64m1_t test_vluxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t vl) {
   return vluxei8_v_f64m1(base, bindex, vl);
 }
 
@@ -1771,7 +1772,7 @@ vfloat64m1_t testuxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i8.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
-vfloat64m2_t testuxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t vl) {
+vfloat64m2_t test_vluxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t vl) {
   return vluxei8_v_f64m2(base, bindex, vl);
 }
 
@@ -1781,7 +1782,7 @@ vfloat64m2_t testuxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i8.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
-vfloat64m4_t testuxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t vl) {
+vfloat64m4_t test_vluxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t vl) {
   return vluxei8_v_f64m4(base, bindex, vl);
 }
 
@@ -1791,7 +1792,7 @@ vfloat64m4_t testuxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t vl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i8.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
-vfloat64m8_t testuxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t vl) {
+vfloat64m8_t test_vluxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t vl) {
   return vluxei8_v_f64m8(base, bindex, vl);
 }
 
@@ -1801,7 +1802,7 @@ vfloat64m8_t testuxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i16.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
-vfloat64m1_t testuxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size_t vl) {
+vfloat64m1_t test_vluxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size_t vl) {
   return vluxei16_v_f64m1(base, bindex, vl);
 }
 
@@ -1811,7 +1812,7 @@ vfloat64m1_t testuxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i16.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
-vfloat64m2_t testuxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size_t vl) {
+vfloat64m2_t test_vluxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size_t vl) {
   return vluxei16_v_f64m2(base, bindex, vl);
 }
 
@@ -1821,7 +1822,7 @@ vfloat64m2_t testuxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i16.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
-vfloat64m4_t testuxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_t vl) {
+vfloat64m4_t test_vluxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_t vl) {
   return vluxei16_v_f64m4(base, bindex, vl);
 }
 
@@ -1831,7 +1832,7 @@ vfloat64m4_t testuxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i16.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
-vfloat64m8_t testuxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_t vl) {
+vfloat64m8_t test_vluxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_t vl) {
   return vluxei16_v_f64m8(base, bindex, vl);
 }
 
@@ -1841,7 +1842,7 @@ vfloat64m8_t testuxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i32.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
-vfloat64m1_t testuxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size_t vl) {
+vfloat64m1_t test_vluxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size_t vl) {
   return vluxei32_v_f64m1(base, bindex, vl);
 }
 
@@ -1851,7 +1852,7 @@ vfloat64m1_t testuxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i32.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
-vfloat64m2_t testuxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_t vl) {
+vfloat64m2_t test_vluxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_t vl) {
   return vluxei32_v_f64m2(base, bindex, vl);
 }
 
@@ -1861,7 +1862,7 @@ vfloat64m2_t testuxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i32.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
-vfloat64m4_t testuxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_t vl) {
+vfloat64m4_t test_vluxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_t vl) {
   return vluxei32_v_f64m4(base, bindex, vl);
 }
 
@@ -1871,7 +1872,7 @@ vfloat64m4_t testuxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i32.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
-vfloat64m8_t testuxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_t vl) {
+vfloat64m8_t test_vluxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_t vl) {
   return vluxei32_v_f64m8(base, bindex, vl);
 }
 
@@ -1881,7 +1882,7 @@ vfloat64m8_t testuxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i64.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
-vfloat64m1_t testuxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_t vl) {
+vfloat64m1_t test_vluxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_t vl) {
   return vluxei64_v_f64m1(base, bindex, vl);
 }
 
@@ -1891,7 +1892,7 @@ vfloat64m1_t testuxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i64.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
-vfloat64m2_t testuxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_t vl) {
+vfloat64m2_t test_vluxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_t vl) {
   return vluxei64_v_f64m2(base, bindex, vl);
 }
 
@@ -1901,7 +1902,7 @@ vfloat64m2_t testuxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i64.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
-vfloat64m4_t testuxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_t vl) {
+vfloat64m4_t test_vluxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_t vl) {
   return vluxei64_v_f64m4(base, bindex, vl);
 }
 
@@ -1911,7 +1912,7 @@ vfloat64m4_t testuxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i64.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
-vfloat64m8_t testuxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t vl) {
+vfloat64m8_t test_vluxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t vl) {
   return vluxei64_v_f64m8(base, bindex, vl);
 }
 
@@ -1921,7 +1922,7 @@ vfloat64m8_t testuxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t v
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
-vint8mf8_t testuxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
+vint8mf8_t test_vluxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
   return vluxei8_v_i8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -1931,7 +1932,7 @@ vint8mf8_t testuxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
-vint8mf4_t testuxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
+vint8mf4_t test_vluxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
   return vluxei8_v_i8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -1941,7 +1942,7 @@ vint8mf4_t testuxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
-vint8mf2_t testuxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
+vint8mf2_t test_vluxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
   return vluxei8_v_i8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -1951,7 +1952,7 @@ vint8mf2_t testuxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
-vint8m1_t testuxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) {
+vint8m1_t test_vluxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) {
   return vluxei8_v_i8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -1961,7 +1962,7 @@ vint8m1_t testuxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *b
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
-vint8m2_t testuxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) {
+vint8m2_t test_vluxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) {
   return vluxei8_v_i8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -1971,7 +1972,7 @@ vint8m2_t testuxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *b
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
-vint8m4_t testuxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) {
+vint8m4_t test_vluxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) {
   return vluxei8_v_i8m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -1981,7 +1982,7 @@ vint8m4_t testuxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *b
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP1]]
 //
-vint8m8_t testuxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) {
+vint8m8_t test_vluxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) {
   return vluxei8_v_i8m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -1991,7 +1992,7 @@ vint8m8_t testuxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *b
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
-vint8mf8_t testuxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
+vint8mf8_t test_vluxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
   return vluxei16_v_i8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2001,7 +2002,7 @@ vint8mf8_t testuxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
-vint8mf4_t testuxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
+vint8mf4_t test_vluxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
   return vluxei16_v_i8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2011,7 +2012,7 @@ vint8mf4_t testuxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
-vint8mf2_t testuxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) {
+vint8mf2_t test_vluxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) {
   return vluxei16_v_i8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2021,7 +2022,7 @@ vint8mf2_t testuxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
-vint8m1_t testuxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) {
+vint8m1_t test_vluxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) {
   return vluxei16_v_i8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2031,7 +2032,7 @@ vint8m1_t testuxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
-vint8m2_t testuxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) {
+vint8m2_t test_vluxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) {
   return vluxei16_v_i8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2041,7 +2042,7 @@ vint8m2_t testuxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
-vint8m4_t testuxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) {
+vint8m4_t test_vluxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) {
   return vluxei16_v_i8m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2051,7 +2052,7 @@ vint8m4_t testuxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
-vint8mf8_t testuxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
+vint8mf8_t test_vluxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
   return vluxei32_v_i8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2061,7 +2062,7 @@ vint8mf8_t testuxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
-vint8mf4_t testuxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) {
+vint8mf4_t test_vluxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) {
   return vluxei32_v_i8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2071,7 +2072,7 @@ vint8mf4_t testuxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
-vint8mf2_t testuxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) {
+vint8mf2_t test_vluxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) {
   return vluxei32_v_i8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2081,7 +2082,7 @@ vint8mf2_t testuxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
-vint8m1_t testuxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) {
+vint8m1_t test_vluxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) {
   return vluxei32_v_i8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2091,7 +2092,7 @@ vint8m1_t testuxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
-vint8m2_t testuxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) {
+vint8m2_t test_vluxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) {
   return vluxei32_v_i8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2101,7 +2102,7 @@ vint8m2_t testuxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
-vint8mf8_t testuxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) {
+vint8mf8_t test_vluxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) {
   return vluxei64_v_i8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2111,7 +2112,7 @@ vint8mf8_t testuxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
-vint8mf4_t testuxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) {
+vint8mf4_t test_vluxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) {
   return vluxei64_v_i8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2121,7 +2122,7 @@ vint8mf4_t testuxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
-vint8mf2_t testuxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) {
+vint8mf2_t test_vluxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) {
   return vluxei64_v_i8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2131,7 +2132,7 @@ vint8mf2_t testuxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
-vint8m1_t testuxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) {
+vint8m1_t test_vluxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) {
   return vluxei64_v_i8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2141,7 +2142,7 @@ vint8m1_t testuxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
-vint16mf4_t testuxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
+vint16mf4_t test_vluxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
   return vluxei8_v_i16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2151,7 +2152,7 @@ vint16mf4_t testuxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const in
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
-vint16mf2_t testuxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
+vint16mf2_t test_vluxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
   return vluxei8_v_i16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2161,7 +2162,7 @@ vint16mf2_t testuxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const in
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
-vint16m1_t testuxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
+vint16m1_t test_vluxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
   return vluxei8_v_i16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2171,7 +2172,7 @@ vint16m1_t testuxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
-vint16m2_t testuxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) {
+vint16m2_t test_vluxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) {
   return vluxei8_v_i16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2181,7 +2182,7 @@ vint16m2_t testuxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
-vint16m4_t testuxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) {
+vint16m4_t test_vluxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) {
   return vluxei8_v_i16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2191,7 +2192,7 @@ vint16m4_t testuxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
-vint16m8_t testuxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) {
+vint16m8_t test_vluxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) {
   return vluxei8_v_i16m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2201,7 +2202,7 @@ vint16m8_t testuxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
-vint16mf4_t testuxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
+vint16mf4_t test_vluxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
   return vluxei16_v_i16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2211,7 +2212,7 @@ vint16mf4_t testuxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const i
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
-vint16mf2_t testuxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
+vint16mf2_t test_vluxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
   return vluxei16_v_i16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2221,7 +2222,7 @@ vint16mf2_t testuxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const i
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
-vint16m1_t testuxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) {
+vint16m1_t test_vluxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) {
   return vluxei16_v_i16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2231,7 +2232,7 @@ vint16m1_t testuxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int1
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
-vint16m2_t testuxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) {
+vint16m2_t test_vluxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) {
   return vluxei16_v_i16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2241,7 +2242,7 @@ vint16m2_t testuxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
-vint16m4_t testuxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) {
+vint16m4_t test_vluxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) {
   return vluxei16_v_i16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2251,7 +2252,7 @@ vint16m4_t testuxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
-vint16m8_t testuxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) {
+vint16m8_t test_vluxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) {
   return vluxei16_v_i16m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2261,7 +2262,7 @@ vint16m8_t testuxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
-vint16mf4_t testuxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
+vint16mf4_t test_vluxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
   return vluxei32_v_i16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2271,7 +2272,7 @@ vint16mf4_t testuxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const i
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
-vint16mf2_t testuxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) {
+vint16mf2_t test_vluxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) {
   return vluxei32_v_i16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2281,7 +2282,7 @@ vint16mf2_t testuxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const i
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
-vint16m1_t testuxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) {
+vint16m1_t test_vluxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) {
   return vluxei32_v_i16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2291,7 +2292,7 @@ vint16m1_t testuxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int1
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
-vint16m2_t testuxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) {
+vint16m2_t test_vluxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) {
   return vluxei32_v_i16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2301,7 +2302,7 @@ vint16m2_t testuxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
-vint16m4_t testuxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) {
+vint16m4_t test_vluxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) {
   return vluxei32_v_i16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2311,7 +2312,7 @@ vint16m4_t testuxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
-vint16mf4_t testuxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) {
+vint16mf4_t test_vluxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) {
   return vluxei64_v_i16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2321,7 +2322,7 @@ vint16mf4_t testuxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const i
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
-vint16mf2_t testuxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) {
+vint16mf2_t test_vluxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) {
   return vluxei64_v_i16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2331,7 +2332,7 @@ vint16mf2_t testuxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const i
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
-vint16m1_t testuxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) {
+vint16m1_t test_vluxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) {
   return vluxei64_v_i16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2341,7 +2342,7 @@ vint16m1_t testuxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int1
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
-vint16m2_t testuxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) {
+vint16m2_t test_vluxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) {
   return vluxei64_v_i16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2351,7 +2352,7 @@ vint16m2_t testuxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
-vint32mf2_t testuxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
+vint32mf2_t test_vluxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
   return vluxei8_v_i32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2361,7 +2362,7 @@ vint32mf2_t testuxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const in
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
-vint32m1_t testuxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
+vint32m1_t test_vluxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
   return vluxei8_v_i32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2371,7 +2372,7 @@ vint32m1_t testuxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
-vint32m2_t testuxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
+vint32m2_t test_vluxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
   return vluxei8_v_i32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2381,7 +2382,7 @@ vint32m2_t testuxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
-vint32m4_t testuxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) {
+vint32m4_t test_vluxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) {
   return vluxei8_v_i32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2391,7 +2392,7 @@ vint32m4_t testuxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
-vint32m8_t testuxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) {
+vint32m8_t test_vluxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) {
   return vluxei8_v_i32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2401,7 +2402,7 @@ vint32m8_t testuxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
-vint32mf2_t testuxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
+vint32mf2_t test_vluxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
   return vluxei16_v_i32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2411,7 +2412,7 @@ vint32mf2_t testuxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const i
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
-vint32m1_t testuxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
+vint32m1_t test_vluxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
   return vluxei16_v_i32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2421,7 +2422,7 @@ vint32m1_t testuxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int3
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
-vint32m2_t testuxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) {
+vint32m2_t test_vluxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) {
   return vluxei16_v_i32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2431,7 +2432,7 @@ vint32m2_t testuxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int3
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
-vint32m4_t testuxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) {
+vint32m4_t test_vluxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) {
   return vluxei16_v_i32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2441,7 +2442,7 @@ vint32m4_t testuxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
-vint32m8_t testuxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) {
+vint32m8_t test_vluxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) {
   return vluxei16_v_i32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2451,7 +2452,7 @@ vint32m8_t testuxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
-vint32mf2_t testuxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+vint32mf2_t test_vluxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
   return vluxei32_v_i32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2461,7 +2462,7 @@ vint32mf2_t testuxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const i
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
-vint32m1_t testuxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) {
+vint32m1_t test_vluxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) {
   return vluxei32_v_i32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2471,7 +2472,7 @@ vint32m1_t testuxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int3
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
-vint32m2_t testuxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) {
+vint32m2_t test_vluxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) {
   return vluxei32_v_i32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2481,7 +2482,7 @@ vint32m2_t testuxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int3
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
-vint32m4_t testuxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) {
+vint32m4_t test_vluxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) {
   return vluxei32_v_i32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2491,7 +2492,7 @@ vint32m4_t testuxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
-vint32m8_t testuxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) {
+vint32m8_t test_vluxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) {
   return vluxei32_v_i32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2501,7 +2502,7 @@ vint32m8_t testuxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
-vint32mf2_t testuxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) {
+vint32mf2_t test_vluxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) {
   return vluxei64_v_i32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2511,7 +2512,7 @@ vint32mf2_t testuxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const i
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
-vint32m1_t testuxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) {
+vint32m1_t test_vluxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) {
   return vluxei64_v_i32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2521,7 +2522,7 @@ vint32m1_t testuxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int3
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
-vint32m2_t testuxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) {
+vint32m2_t test_vluxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) {
   return vluxei64_v_i32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2531,7 +2532,7 @@ vint32m2_t testuxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int3
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
-vint32m4_t testuxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) {
+vint32m4_t test_vluxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) {
   return vluxei64_v_i32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2541,7 +2542,7 @@ vint32m4_t testuxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
-vint64m1_t testuxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
+vint64m1_t test_vluxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
   return vluxei8_v_i64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2551,7 +2552,7 @@ vint64m1_t testuxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
-vint64m2_t testuxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
+vint64m2_t test_vluxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
   return vluxei8_v_i64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2561,7 +2562,7 @@ vint64m2_t testuxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
-vint64m4_t testuxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) {
+vint64m4_t test_vluxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) {
   return vluxei8_v_i64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2571,7 +2572,7 @@ vint64m4_t testuxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
-vint64m8_t testuxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) {
+vint64m8_t test_vluxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) {
   return vluxei8_v_i64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2581,7 +2582,7 @@ vint64m8_t testuxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
-vint64m1_t testuxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
+vint64m1_t test_vluxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
   return vluxei16_v_i64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2591,7 +2592,7 @@ vint64m1_t testuxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int6
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
-vint64m2_t testuxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
+vint64m2_t test_vluxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
   return vluxei16_v_i64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2601,7 +2602,7 @@ vint64m2_t testuxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int6
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
-vint64m4_t testuxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) {
+vint64m4_t test_vluxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) {
   return vluxei16_v_i64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2611,7 +2612,7 @@ vint64m4_t testuxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int6
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
-vint64m8_t testuxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) {
+vint64m8_t test_vluxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) {
   return vluxei16_v_i64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2621,7 +2622,7 @@ vint64m8_t testuxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
-vint64m1_t testuxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
+vint64m1_t test_vluxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
   return vluxei32_v_i64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2631,7 +2632,7 @@ vint64m1_t testuxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int6
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
-vint64m2_t testuxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) {
+vint64m2_t test_vluxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) {
   return vluxei32_v_i64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2641,7 +2642,7 @@ vint64m2_t testuxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int6
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
-vint64m4_t testuxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) {
+vint64m4_t test_vluxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) {
   return vluxei32_v_i64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2651,7 +2652,7 @@ vint64m4_t testuxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int6
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
-vint64m8_t testuxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) {
+vint64m8_t test_vluxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) {
   return vluxei32_v_i64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2661,7 +2662,7 @@ vint64m8_t testuxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
-vint64m1_t testuxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) {
+vint64m1_t test_vluxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) {
   return vluxei64_v_i64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2671,7 +2672,7 @@ vint64m1_t testuxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int6
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
-vint64m2_t testuxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) {
+vint64m2_t test_vluxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) {
   return vluxei64_v_i64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2681,7 +2682,7 @@ vint64m2_t testuxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int6
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
-vint64m4_t testuxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) {
+vint64m4_t test_vluxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) {
   return vluxei64_v_i64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2691,7 +2692,7 @@ vint64m4_t testuxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int6
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
-vint64m8_t testuxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) {
+vint64m8_t test_vluxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) {
   return vluxei64_v_i64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2701,7 +2702,7 @@ vint64m8_t testuxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
-vuint8mf8_t testuxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
+vuint8mf8_t test_vluxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
   return vluxei8_v_u8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2711,7 +2712,7 @@ vuint8mf8_t testuxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
-vuint8mf4_t testuxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
+vuint8mf4_t test_vluxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
   return vluxei8_v_u8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2721,7 +2722,7 @@ vuint8mf4_t testuxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
-vuint8mf2_t testuxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
+vuint8mf2_t test_vluxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
   return vluxei8_v_u8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2731,7 +2732,7 @@ vuint8mf2_t testuxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
-vuint8m1_t testuxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
+vuint8m1_t test_vluxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
   return vluxei8_v_u8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2741,7 +2742,7 @@ vuint8m1_t testuxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
-vuint8m2_t testuxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
+vuint8m2_t test_vluxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
   return vluxei8_v_u8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2751,7 +2752,7 @@ vuint8m2_t testuxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
-vuint8m4_t testuxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) {
+vuint8m4_t test_vluxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) {
   return vluxei8_v_u8m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2761,7 +2762,7 @@ vuint8m4_t testuxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP1]]
 //
-vuint8m8_t testuxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) {
+vuint8m8_t test_vluxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) {
   return vluxei8_v_u8m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2771,7 +2772,7 @@ vuint8m8_t testuxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
-vuint8mf8_t testuxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
+vuint8mf8_t test_vluxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
   return vluxei16_v_u8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2781,7 +2782,7 @@ vuint8mf8_t testuxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
-vuint8mf4_t testuxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
+vuint8mf4_t test_vluxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
   return vluxei16_v_u8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2791,7 +2792,7 @@ vuint8mf4_t testuxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
-vuint8mf2_t testuxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
+vuint8mf2_t test_vluxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
   return vluxei16_v_u8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2801,7 +2802,7 @@ vuint8mf2_t testuxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
-vuint8m1_t testuxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
+vuint8m1_t test_vluxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
   return vluxei16_v_u8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2811,7 +2812,7 @@ vuint8m1_t testuxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
-vuint8m2_t testuxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
+vuint8m2_t test_vluxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
   return vluxei16_v_u8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2821,7 +2822,7 @@ vuint8m2_t testuxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
 //
-vuint8m4_t testuxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) {
+vuint8m4_t test_vluxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) {
   return vluxei16_v_u8m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2831,7 +2832,7 @@ vuint8m4_t testuxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
-vuint8mf8_t testuxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
+vuint8mf8_t test_vluxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
   return vluxei32_v_u8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2841,7 +2842,7 @@ vuint8mf8_t testuxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
-vuint8mf4_t testuxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
+vuint8mf4_t test_vluxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
   return vluxei32_v_u8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2851,7 +2852,7 @@ vuint8mf4_t testuxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
-vuint8mf2_t testuxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
+vuint8mf2_t test_vluxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
   return vluxei32_v_u8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2861,7 +2862,7 @@ vuint8mf2_t testuxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
-vuint8m1_t testuxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
+vuint8m1_t test_vluxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
   return vluxei32_v_u8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2871,7 +2872,7 @@ vuint8m1_t testuxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
 //
-vuint8m2_t testuxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
+vuint8m2_t test_vluxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
   return vluxei32_v_u8m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2881,7 +2882,7 @@ vuint8m2_t testuxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
 //
-vuint8mf8_t testuxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
+vuint8mf8_t test_vluxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
   return vluxei64_v_u8mf8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2891,7 +2892,7 @@ vuint8mf8_t testuxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
 //
-vuint8mf4_t testuxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
+vuint8mf4_t test_vluxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
   return vluxei64_v_u8mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2901,7 +2902,7 @@ vuint8mf4_t testuxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
 //
-vuint8mf2_t testuxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
+vuint8mf2_t test_vluxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
   return vluxei64_v_u8mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2911,7 +2912,7 @@ vuint8mf2_t testuxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
 //
-vuint8m1_t testuxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
+vuint8m1_t test_vluxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
   return vluxei64_v_u8m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2921,7 +2922,7 @@ vuint8m1_t testuxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
-vuint16mf4_t testuxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
+vuint16mf4_t test_vluxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
   return vluxei8_v_u16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2931,7 +2932,7 @@ vuint16mf4_t testuxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
-vuint16mf2_t testuxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
+vuint16mf2_t test_vluxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
   return vluxei8_v_u16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2941,7 +2942,7 @@ vuint16mf2_t testuxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
-vuint16m1_t testuxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
+vuint16m1_t test_vluxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
   return vluxei8_v_u16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2951,7 +2952,7 @@ vuint16m1_t testuxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
-vuint16m2_t testuxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
+vuint16m2_t test_vluxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
   return vluxei8_v_u16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2961,7 +2962,7 @@ vuint16m2_t testuxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
-vuint16m4_t testuxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) {
+vuint16m4_t test_vluxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) {
   return vluxei8_v_u16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2971,7 +2972,7 @@ vuint16m4_t testuxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
-vuint16m8_t testuxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) {
+vuint16m8_t test_vluxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) {
   return vluxei8_v_u16m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2981,7 +2982,7 @@ vuint16m8_t testuxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
-vuint16mf4_t testuxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
+vuint16mf4_t test_vluxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
   return vluxei16_v_u16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -2991,7 +2992,7 @@ vuint16mf4_t testuxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
-vuint16mf2_t testuxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
+vuint16mf2_t test_vluxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
   return vluxei16_v_u16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3001,7 +3002,7 @@ vuint16mf2_t testuxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
-vuint16m1_t testuxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
+vuint16m1_t test_vluxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
   return vluxei16_v_u16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3011,7 +3012,7 @@ vuint16m1_t testuxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
-vuint16m2_t testuxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
+vuint16m2_t test_vluxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
   return vluxei16_v_u16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3021,7 +3022,7 @@ vuint16m2_t testuxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
-vuint16m4_t testuxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) {
+vuint16m4_t test_vluxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) {
   return vluxei16_v_u16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3031,7 +3032,7 @@ vuint16m4_t testuxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
 //
-vuint16m8_t testuxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) {
+vuint16m8_t test_vluxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) {
   return vluxei16_v_u16m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3041,7 +3042,7 @@ vuint16m8_t testuxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
-vuint16mf4_t testuxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
+vuint16mf4_t test_vluxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
   return vluxei32_v_u16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3051,7 +3052,7 @@ vuint16mf4_t testuxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
-vuint16mf2_t testuxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
+vuint16mf2_t test_vluxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
   return vluxei32_v_u16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3061,7 +3062,7 @@ vuint16mf2_t testuxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
-vuint16m1_t testuxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
+vuint16m1_t test_vluxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
   return vluxei32_v_u16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3071,7 +3072,7 @@ vuint16m1_t testuxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
-vuint16m2_t testuxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
+vuint16m2_t test_vluxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
   return vluxei32_v_u16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3081,7 +3082,7 @@ vuint16m2_t testuxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
 //
-vuint16m4_t testuxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) {
+vuint16m4_t test_vluxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) {
   return vluxei32_v_u16m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3091,7 +3092,7 @@ vuint16m4_t testuxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
 //
-vuint16mf4_t testuxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
+vuint16mf4_t test_vluxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
   return vluxei64_v_u16mf4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3101,7 +3102,7 @@ vuint16mf4_t testuxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
 //
-vuint16mf2_t testuxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
+vuint16mf2_t test_vluxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
   return vluxei64_v_u16mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3111,7 +3112,7 @@ vuint16mf2_t testuxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
 //
-vuint16m1_t testuxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
+vuint16m1_t test_vluxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
   return vluxei64_v_u16m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3121,7 +3122,7 @@ vuint16m1_t testuxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
 //
-vuint16m2_t testuxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
+vuint16m2_t test_vluxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
   return vluxei64_v_u16m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3131,7 +3132,7 @@ vuint16m2_t testuxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
-vuint32mf2_t testuxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
+vuint32mf2_t test_vluxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
   return vluxei8_v_u32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3141,7 +3142,7 @@ vuint32mf2_t testuxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
-vuint32m1_t testuxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
+vuint32m1_t test_vluxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
   return vluxei8_v_u32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3151,7 +3152,7 @@ vuint32m1_t testuxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
-vuint32m2_t testuxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
+vuint32m2_t test_vluxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
   return vluxei8_v_u32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3161,7 +3162,7 @@ vuint32m2_t testuxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
-vuint32m4_t testuxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) {
+vuint32m4_t test_vluxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) {
   return vluxei8_v_u32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3171,7 +3172,7 @@ vuint32m4_t testuxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
-vuint32m8_t testuxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) {
+vuint32m8_t test_vluxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) {
   return vluxei8_v_u32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3181,7 +3182,7 @@ vuint32m8_t testuxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
-vuint32mf2_t testuxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
+vuint32mf2_t test_vluxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
   return vluxei16_v_u32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3191,7 +3192,7 @@ vuint32mf2_t testuxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
-vuint32m1_t testuxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
+vuint32m1_t test_vluxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
   return vluxei16_v_u32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3201,7 +3202,7 @@ vuint32m1_t testuxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
-vuint32m2_t testuxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
+vuint32m2_t test_vluxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
   return vluxei16_v_u32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3211,7 +3212,7 @@ vuint32m2_t testuxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
-vuint32m4_t testuxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) {
+vuint32m4_t test_vluxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) {
   return vluxei16_v_u32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3221,7 +3222,7 @@ vuint32m4_t testuxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
-vuint32m8_t testuxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) {
+vuint32m8_t test_vluxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) {
   return vluxei16_v_u32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3231,7 +3232,7 @@ vuint32m8_t testuxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
-vuint32mf2_t testuxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
+vuint32mf2_t test_vluxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
   return vluxei32_v_u32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3241,7 +3242,7 @@ vuint32mf2_t testuxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
-vuint32m1_t testuxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
+vuint32m1_t test_vluxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
   return vluxei32_v_u32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3251,7 +3252,7 @@ vuint32m1_t testuxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
-vuint32m2_t testuxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
+vuint32m2_t test_vluxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
   return vluxei32_v_u32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3261,7 +3262,7 @@ vuint32m2_t testuxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
-vuint32m4_t testuxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) {
+vuint32m4_t test_vluxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) {
   return vluxei32_v_u32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3271,7 +3272,7 @@ vuint32m4_t testuxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
 //
-vuint32m8_t testuxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) {
+vuint32m8_t test_vluxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) {
   return vluxei32_v_u32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3281,7 +3282,7 @@ vuint32m8_t testuxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
 //
-vuint32mf2_t testuxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
+vuint32mf2_t test_vluxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
   return vluxei64_v_u32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3291,7 +3292,7 @@ vuint32mf2_t testuxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
 //
-vuint32m1_t testuxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
+vuint32m1_t test_vluxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
   return vluxei64_v_u32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3301,7 +3302,7 @@ vuint32m1_t testuxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
 //
-vuint32m2_t testuxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
+vuint32m2_t test_vluxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
   return vluxei64_v_u32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3311,7 +3312,7 @@ vuint32m2_t testuxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
 //
-vuint32m4_t testuxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) {
+vuint32m4_t test_vluxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) {
   return vluxei64_v_u32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3321,7 +3322,7 @@ vuint32m4_t testuxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
-vuint64m1_t testuxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
+vuint64m1_t test_vluxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
   return vluxei8_v_u64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3331,7 +3332,7 @@ vuint64m1_t testuxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
-vuint64m2_t testuxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
+vuint64m2_t test_vluxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
   return vluxei8_v_u64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3341,7 +3342,7 @@ vuint64m2_t testuxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
-vuint64m4_t testuxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
+vuint64m4_t test_vluxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
   return vluxei8_v_u64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3351,7 +3352,7 @@ vuint64m4_t testuxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
-vuint64m8_t testuxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) {
+vuint64m8_t test_vluxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) {
   return vluxei8_v_u64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3361,7 +3362,7 @@ vuint64m8_t testuxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
-vuint64m1_t testuxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
+vuint64m1_t test_vluxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
   return vluxei16_v_u64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3371,7 +3372,7 @@ vuint64m1_t testuxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
-vuint64m2_t testuxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
+vuint64m2_t test_vluxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
   return vluxei16_v_u64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3381,7 +3382,7 @@ vuint64m2_t testuxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
-vuint64m4_t testuxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) {
+vuint64m4_t test_vluxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) {
   return vluxei16_v_u64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3391,7 +3392,7 @@ vuint64m4_t testuxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
-vuint64m8_t testuxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) {
+vuint64m8_t test_vluxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) {
   return vluxei16_v_u64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3401,7 +3402,7 @@ vuint64m8_t testuxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
-vuint64m1_t testuxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
+vuint64m1_t test_vluxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
   return vluxei32_v_u64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3411,7 +3412,7 @@ vuint64m1_t testuxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
-vuint64m2_t testuxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
+vuint64m2_t test_vluxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
   return vluxei32_v_u64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3421,7 +3422,7 @@ vuint64m2_t testuxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
-vuint64m4_t testuxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) {
+vuint64m4_t test_vluxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) {
   return vluxei32_v_u64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3431,7 +3432,7 @@ vuint64m4_t testuxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
-vuint64m8_t testuxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) {
+vuint64m8_t test_vluxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) {
   return vluxei32_v_u64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3441,7 +3442,7 @@ vuint64m8_t testuxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
 //
-vuint64m1_t testuxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
+vuint64m1_t test_vluxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
   return vluxei64_v_u64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3451,7 +3452,7 @@ vuint64m1_t testuxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
 //
-vuint64m2_t testuxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
+vuint64m2_t test_vluxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
   return vluxei64_v_u64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3461,7 +3462,7 @@ vuint64m2_t testuxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
 //
-vuint64m4_t testuxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) {
+vuint64m4_t test_vluxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) {
   return vluxei64_v_u64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3471,7 +3472,7 @@ vuint64m4_t testuxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const ui
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
 //
-vuint64m8_t testuxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) {
+vuint64m8_t test_vluxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) {
   return vluxei64_v_u64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3481,7 +3482,7 @@ vuint64m8_t testuxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uin
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
-vfloat32mf2_t testuxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) {
+vfloat32mf2_t test_vluxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) {
   return vluxei8_v_f32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3491,7 +3492,7 @@ vfloat32mf2_t testuxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, cons
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
-vfloat32m1_t testuxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) {
+vfloat32m1_t test_vluxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) {
   return vluxei8_v_f32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3501,7 +3502,7 @@ vfloat32m1_t testuxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const f
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
-vfloat32m2_t testuxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) {
+vfloat32m2_t test_vluxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) {
   return vluxei8_v_f32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3511,7 +3512,7 @@ vfloat32m2_t testuxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const f
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
-vfloat32m4_t testuxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) {
+vfloat32m4_t test_vluxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) {
   return vluxei8_v_f32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3521,7 +3522,7 @@ vfloat32m4_t testuxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const fl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
 //
-vfloat32m8_t testuxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) {
+vfloat32m8_t test_vluxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) {
   return vluxei8_v_f32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3531,7 +3532,7 @@ vfloat32m8_t testuxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const fl
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
-vfloat32mf2_t testuxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) {
+vfloat32mf2_t test_vluxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) {
   return vluxei16_v_f32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3541,7 +3542,7 @@ vfloat32mf2_t testuxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, con
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
-vfloat32m1_t testuxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) {
+vfloat32m1_t test_vluxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) {
   return vluxei16_v_f32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3551,7 +3552,7 @@ vfloat32m1_t testuxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
-vfloat32m2_t testuxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) {
+vfloat32m2_t test_vluxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) {
   return vluxei16_v_f32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3561,7 +3562,7 @@ vfloat32m2_t testuxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
-vfloat32m4_t testuxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) {
+vfloat32m4_t test_vluxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) {
   return vluxei16_v_f32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3571,7 +3572,7 @@ vfloat32m4_t testuxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const f
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
 //
-vfloat32m8_t testuxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) {
+vfloat32m8_t test_vluxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) {
   return vluxei16_v_f32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3581,7 +3582,7 @@ vfloat32m8_t testuxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const f
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
-vfloat32mf2_t testuxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) {
+vfloat32mf2_t test_vluxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) {
   return vluxei32_v_f32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3591,7 +3592,7 @@ vfloat32mf2_t testuxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, con
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
-vfloat32m1_t testuxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) {
+vfloat32m1_t test_vluxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) {
   return vluxei32_v_f32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3601,7 +3602,7 @@ vfloat32m1_t testuxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
-vfloat32m2_t testuxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) {
+vfloat32m2_t test_vluxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) {
   return vluxei32_v_f32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3611,7 +3612,7 @@ vfloat32m2_t testuxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
-vfloat32m4_t testuxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) {
+vfloat32m4_t test_vluxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) {
   return vluxei32_v_f32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3621,7 +3622,7 @@ vfloat32m4_t testuxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const f
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
 //
-vfloat32m8_t testuxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) {
+vfloat32m8_t test_vluxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) {
   return vluxei32_v_f32m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3631,7 +3632,7 @@ vfloat32m8_t testuxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const f
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
 //
-vfloat32mf2_t testuxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) {
+vfloat32mf2_t test_vluxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) {
   return vluxei64_v_f32mf2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3641,7 +3642,7 @@ vfloat32mf2_t testuxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, con
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
 //
-vfloat32m1_t testuxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) {
+vfloat32m1_t test_vluxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) {
   return vluxei64_v_f32m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3651,7 +3652,7 @@ vfloat32m1_t testuxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
 //
-vfloat32m2_t testuxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) {
+vfloat32m2_t test_vluxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) {
   return vluxei64_v_f32m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3661,7 +3662,7 @@ vfloat32m2_t testuxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
 //
-vfloat32m4_t testuxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) {
+vfloat32m4_t test_vluxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) {
   return vluxei64_v_f32m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3671,7 +3672,7 @@ vfloat32m4_t testuxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const f
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
-vfloat64m1_t testuxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) {
+vfloat64m1_t test_vluxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) {
   return vluxei8_v_f64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3681,7 +3682,7 @@ vfloat64m1_t testuxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const d
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
-vfloat64m2_t testuxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) {
+vfloat64m2_t test_vluxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) {
   return vluxei8_v_f64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3691,7 +3692,7 @@ vfloat64m2_t testuxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const d
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
-vfloat64m4_t testuxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) {
+vfloat64m4_t test_vluxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) {
   return vluxei8_v_f64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3701,7 +3702,7 @@ vfloat64m4_t testuxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const d
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
-vfloat64m8_t testuxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) {
+vfloat64m8_t test_vluxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) {
   return vluxei8_v_f64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3711,7 +3712,7 @@ vfloat64m8_t testuxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const do
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
-vfloat64m1_t testuxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) {
+vfloat64m1_t test_vluxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) {
   return vluxei16_v_f64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3721,7 +3722,7 @@ vfloat64m1_t testuxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
-vfloat64m2_t testuxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) {
+vfloat64m2_t test_vluxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) {
   return vluxei16_v_f64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3731,7 +3732,7 @@ vfloat64m2_t testuxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
-vfloat64m4_t testuxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) {
+vfloat64m4_t test_vluxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) {
   return vluxei16_v_f64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3741,7 +3742,7 @@ vfloat64m4_t testuxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
-vfloat64m8_t testuxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) {
+vfloat64m8_t test_vluxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) {
   return vluxei16_v_f64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3751,7 +3752,7 @@ vfloat64m8_t testuxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const d
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
-vfloat64m1_t testuxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) {
+vfloat64m1_t test_vluxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) {
   return vluxei32_v_f64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3761,7 +3762,7 @@ vfloat64m1_t testuxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
-vfloat64m2_t testuxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) {
+vfloat64m2_t test_vluxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) {
   return vluxei32_v_f64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3771,7 +3772,7 @@ vfloat64m2_t testuxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
-vfloat64m4_t testuxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) {
+vfloat64m4_t test_vluxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) {
   return vluxei32_v_f64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3781,7 +3782,7 @@ vfloat64m4_t testuxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
-vfloat64m8_t testuxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) {
+vfloat64m8_t test_vluxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) {
   return vluxei32_v_f64m8_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3791,7 +3792,7 @@ vfloat64m8_t testuxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const d
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
 //
-vfloat64m1_t testuxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) {
+vfloat64m1_t test_vluxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) {
   return vluxei64_v_f64m1_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3801,7 +3802,7 @@ vfloat64m1_t testuxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
 //
-vfloat64m2_t testuxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) {
+vfloat64m2_t test_vluxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) {
   return vluxei64_v_f64m2_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3811,7 +3812,7 @@ vfloat64m2_t testuxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
 //
-vfloat64m4_t testuxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) {
+vfloat64m4_t test_vluxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) {
   return vluxei64_v_f64m4_m(mask, maskedoff, base, bindex, vl);
 }
 
@@ -3821,6 +3822,427 @@ vfloat64m4_t testuxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const
 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
 //
-vfloat64m8_t testuxei64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) {
+vfloat64m8_t test_vluxei64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) {
   return vluxei64_v_f64m8_m(mask, maskedoff, base, bindex, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i8.i64(<vscale x 1 x half>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP1]]
+//
+vfloat16mf4_t test_vluxei8_v_f16mf4 (const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
+  return vluxei8_v_f16mf4 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i8.i64(<vscale x 2 x half>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP1]]
+//
+vfloat16mf2_t test_vluxei8_v_f16mf2 (const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
+  return vluxei8_v_f16mf2 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei8_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i8.i64(<vscale x 4 x half>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP1]]
+//
+vfloat16m1_t test_vluxei8_v_f16m1 (const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
+  return vluxei8_v_f16m1 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei8_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i8.i64(<vscale x 8 x half>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP1]]
+//
+vfloat16m2_t test_vluxei8_v_f16m2 (const _Float16 *base, vuint8m1_t bindex, size_t vl) {
+  return vluxei8_v_f16m2 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei8_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i8.i64(<vscale x 16 x half>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP1]]
+//
+vfloat16m4_t test_vluxei8_v_f16m4 (const _Float16 *base, vuint8m2_t bindex, size_t vl) {
+  return vluxei8_v_f16m4 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei8_v_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 32 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i8.i64(<vscale x 32 x half>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP1]]
+//
+vfloat16m8_t test_vluxei8_v_f16m8 (const _Float16 *base, vuint8m4_t bindex, size_t vl) {
+  return vluxei8_v_f16m8 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i16.i64(<vscale x 1 x half>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP1]]
+//
+vfloat16mf4_t test_vluxei16_v_f16mf4 (const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
+  return vluxei16_v_f16mf4 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i16.i64(<vscale x 2 x half>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP1]]
+//
+vfloat16mf2_t test_vluxei16_v_f16mf2 (const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
+  return vluxei16_v_f16mf2 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i16.i64(<vscale x 4 x half>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP1]]
+//
+vfloat16m1_t test_vluxei16_v_f16m1 (const _Float16 *base, vuint16m1_t bindex, size_t vl) {
+  return vluxei16_v_f16m1 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei16_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i16.i64(<vscale x 8 x half>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP1]]
+//
+vfloat16m2_t test_vluxei16_v_f16m2 (const _Float16 *base, vuint16m2_t bindex, size_t vl) {
+  return vluxei16_v_f16m2 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei16_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i16.i64(<vscale x 16 x half>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP1]]
+//
+vfloat16m4_t test_vluxei16_v_f16m4 (const _Float16 *base, vuint16m4_t bindex, size_t vl) {
+  return vluxei16_v_f16m4 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei16_v_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 32 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i16.i64(<vscale x 32 x half>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP1]]
+//
+vfloat16m8_t test_vluxei16_v_f16m8 (const _Float16 *base, vuint16m8_t bindex, size_t vl) {
+  return vluxei16_v_f16m8 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i32.i64(<vscale x 1 x half>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP1]]
+//
+vfloat16mf4_t test_vluxei32_v_f16mf4 (const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
+  return vluxei32_v_f16mf4 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i32.i64(<vscale x 2 x half>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP1]]
+//
+vfloat16mf2_t test_vluxei32_v_f16mf2 (const _Float16 *base, vuint32m1_t bindex, size_t vl) {
+  return vluxei32_v_f16mf2 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei32_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i32.i64(<vscale x 4 x half>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP1]]
+//
+vfloat16m1_t test_vluxei32_v_f16m1 (const _Float16 *base, vuint32m2_t bindex, size_t vl) {
+  return vluxei32_v_f16m1 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei32_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i32.i64(<vscale x 8 x half>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP1]]
+//
+vfloat16m2_t test_vluxei32_v_f16m2 (const _Float16 *base, vuint32m4_t bindex, size_t vl) {
+  return vluxei32_v_f16m2 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei32_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i32.i64(<vscale x 16 x half>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP1]]
+//
+vfloat16m4_t test_vluxei32_v_f16m4 (const _Float16 *base, vuint32m8_t bindex, size_t vl) {
+  return vluxei32_v_f16m4 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i64.i64(<vscale x 1 x half>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP1]]
+//
+vfloat16mf4_t test_vluxei64_v_f16mf4 (const _Float16 *base, vuint64m1_t bindex, size_t vl) {
+  return vluxei64_v_f16mf4 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i64.i64(<vscale x 2 x half>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP1]]
+//
+vfloat16mf2_t test_vluxei64_v_f16mf2 (const _Float16 *base, vuint64m2_t bindex, size_t vl) {
+  return vluxei64_v_f16mf2 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei64_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i64.i64(<vscale x 4 x half>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP1]]
+//
+vfloat16m1_t test_vluxei64_v_f16m1 (const _Float16 *base, vuint64m4_t bindex, size_t vl) {
+  return vluxei64_v_f16m1 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei64_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i64.i64(<vscale x 8 x half>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP1]]
+//
+vfloat16m2_t test_vluxei64_v_f16m2 (const _Float16 *base, vuint64m8_t bindex, size_t vl) {
+  return vluxei64_v_f16m2 (base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP1]]
+//
+vfloat16mf4_t test_vluxei8_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
+  return vluxei8_v_f16mf4_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP1]]
+//
+vfloat16mf2_t test_vluxei8_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
+  return vluxei8_v_f16mf2_m (mask, maskedoff, base, bindex, vl);
+}
+
+
+// CHECK-RV64-LABEL: @test_vluxei8_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP1]]
+//
+vfloat16m1_t test_vluxei8_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
+  return vluxei8_v_f16m1_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei8_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP1]]
+//
+vfloat16m2_t test_vluxei8_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
+  return vluxei8_v_f16m2_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei8_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP1]]
+//
+vfloat16m4_t test_vluxei8_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl) {
+  return vluxei8_v_f16m4_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei8_v_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 32 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP1]]
+//
+vfloat16m8_t test_vluxei8_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl) {
+  return vluxei8_v_f16m8_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP1]]
+//
+vfloat16mf4_t test_vluxei16_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
+  return vluxei16_v_f16mf4_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP1]]
+//
+vfloat16mf2_t test_vluxei16_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
+  return vluxei16_v_f16mf2_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP1]]
+//
+vfloat16m1_t test_vluxei16_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
+  return vluxei16_v_f16m1_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei16_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP1]]
+//
+vfloat16m2_t test_vluxei16_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
+  return vluxei16_v_f16m2_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei16_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP1]]
+//
+vfloat16m4_t test_vluxei16_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl) {
+  return vluxei16_v_f16m4_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei16_v_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 32 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP1]]
+//
+vfloat16m8_t test_vluxei16_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl) {
+  return vluxei16_v_f16m8_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP1]]
+//
+vfloat16mf4_t test_vluxei32_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
+  return vluxei32_v_f16mf4_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP1]]
+//
+vfloat16mf2_t test_vluxei32_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
+  return vluxei32_v_f16mf2_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei32_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP1]]
+//
+vfloat16m1_t test_vluxei32_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
+  return vluxei32_v_f16m1_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei32_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP1]]
+//
+vfloat16m2_t test_vluxei32_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
+  return vluxei32_v_f16m2_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei32_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP1]]
+//
+vfloat16m4_t test_vluxei32_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl) {
+  return vluxei32_v_f16m4_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP1]]
+//
+vfloat16mf4_t test_vluxei64_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
+  return vluxei64_v_f16mf4_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP1]]
+//
+vfloat16mf2_t test_vluxei64_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
+  return vluxei64_v_f16mf2_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei64_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP1]]
+//
+vfloat16m1_t test_vluxei64_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
+  return vluxei64_v_f16m1_m (mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxei64_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP1]]
+//
+vfloat16m2_t test_vluxei64_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
+  return vluxei64_v_f16m2_m (mask, maskedoff, base, bindex, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg.c
index 29f27ea1e6b8..29c1122dcc94 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg.c
@@ -1,7 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
-// RUN:   -target-feature +experimental-v \
+// RUN:   -target-feature +experimental-zfh -target-feature +experimental-v \
 // RUN:   -target-feature +experimental-zvlsseg -disable-O0-optnone -emit-llvm %s \
 // RUN:   -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
@@ -28783,3 +28783,3632 @@ void test_vluxseg2ei64_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t
   return vluxseg2ei64_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
 }
 
+// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg2.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
+  return vluxseg2ei8_v_f16mf4(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg3.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg3ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
+  return vluxseg3ei8_v_f16mf4(v0, v1, v2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg4.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg4ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
+  return vluxseg4ei8_v_f16mf4(v0, v1, v2, v3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg5.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg5ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
+  return vluxseg5ei8_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg6.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg6ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
+  return vluxseg6ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg7.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg7ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
+  return vluxseg7ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg8.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP8]], <vscale x 1 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg8ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
+  return vluxseg8ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg2.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
+  return vluxseg2ei8_v_f16mf2(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg3.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg3ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
+  return vluxseg3ei8_v_f16mf2(v0, v1, v2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg4.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg4ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
+  return vluxseg4ei8_v_f16mf2(v0, v1, v2, v3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg5.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg5ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
+  return vluxseg5ei8_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg6.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg6ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
+  return vluxseg6ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg7.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg7ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
+  return vluxseg7ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg8.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP8]], <vscale x 2 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg8ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
+  return vluxseg8ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg2.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
+  return vluxseg2ei8_v_f16m1(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg3.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg3ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
+  return vluxseg3ei8_v_f16m1(v0, v1, v2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg4.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg4ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
+  return vluxseg4ei8_v_f16m1(v0, v1, v2, v3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg5.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg5ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
+  return vluxseg5ei8_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg6.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg6ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
+  return vluxseg6ei8_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg7.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg7ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
+  return vluxseg7ei8_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg8.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP8]], <vscale x 4 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg8ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
+  return vluxseg8ei8_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg2.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
+  return vluxseg2ei8_v_f16m2(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg3.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg3ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
+  return vluxseg3ei8_v_f16m2(v0, v1, v2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg4.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP4]], <vscale x 8 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg4ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
+  return vluxseg4ei8_v_f16m2(v0, v1, v2, v3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half> } @llvm.riscv.vluxseg2.nxv16f16.nxv16i8.i64(half* [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP1]], <vscale x 16 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP2]], <vscale x 16 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei8_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint8m2_t bindex, size_t vl) {
+  return vluxseg2ei8_v_f16m4(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg2.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
+  return vluxseg2ei16_v_f16mf4(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg3.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg3ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
+  return vluxseg3ei16_v_f16mf4(v0, v1, v2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg4.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg4ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
+  return vluxseg4ei16_v_f16mf4(v0, v1, v2, v3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg5.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg5ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
+  return vluxseg5ei16_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg6.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg6ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
+  return vluxseg6ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg7.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg7ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
+  return vluxseg7ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg8.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP8]], <vscale x 1 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg8ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
+  return vluxseg8ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg2.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
+  return vluxseg2ei16_v_f16mf2(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg3.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg3ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
+  return vluxseg3ei16_v_f16mf2(v0, v1, v2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg4.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg4ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
+  return vluxseg4ei16_v_f16mf2(v0, v1, v2, v3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg5.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg5ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
+  return vluxseg5ei16_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg6.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg6ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
+  return vluxseg6ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg7.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg7ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
+  return vluxseg7ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg8.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP8]], <vscale x 2 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg8ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
+  return vluxseg8ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg2.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
+  return vluxseg2ei16_v_f16m1(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg3.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg3ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
+  return vluxseg3ei16_v_f16m1(v0, v1, v2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg4.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg4ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
+  return vluxseg4ei16_v_f16m1(v0, v1, v2, v3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg5.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg5ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
+  return vluxseg5ei16_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg6.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg6ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
+  return vluxseg6ei16_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg7.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg7ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
+  return vluxseg7ei16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg8.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP8]], <vscale x 4 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg8ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
+  return vluxseg8ei16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg2.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
+  return vluxseg2ei16_v_f16m2(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg3.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg3ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
+  return vluxseg3ei16_v_f16m2(v0, v1, v2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg4.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP4]], <vscale x 8 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg4ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
+  return vluxseg4ei16_v_f16m2(v0, v1, v2, v3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half> } @llvm.riscv.vluxseg2.nxv16f16.nxv16i16.i64(half* [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP1]], <vscale x 16 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP2]], <vscale x 16 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei16_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint16m4_t bindex, size_t vl) {
+  return vluxseg2ei16_v_f16m4(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg2.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
+  return vluxseg2ei32_v_f16mf4(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg3.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg3ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
+  return vluxseg3ei32_v_f16mf4(v0, v1, v2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg4.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg4ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
+  return vluxseg4ei32_v_f16mf4(v0, v1, v2, v3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg5.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg5ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
+  return vluxseg5ei32_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg6.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg6ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
+  return vluxseg6ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg7.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg7ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
+  return vluxseg7ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg8.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP8]], <vscale x 1 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg8ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
+  return vluxseg8ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg2.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
+  return vluxseg2ei32_v_f16mf2(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg3.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg3ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
+  return vluxseg3ei32_v_f16mf2(v0, v1, v2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg4.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg4ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
+  return vluxseg4ei32_v_f16mf2(v0, v1, v2, v3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg5.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg5ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
+  return vluxseg5ei32_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg6.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg6ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
+  return vluxseg6ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg7.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg7ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
+  return vluxseg7ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg8.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP8]], <vscale x 2 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg8ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
+  return vluxseg8ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg2.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
+  return vluxseg2ei32_v_f16m1(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg3.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg3ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
+  return vluxseg3ei32_v_f16m1(v0, v1, v2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg4.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg4ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
+  return vluxseg4ei32_v_f16m1(v0, v1, v2, v3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg5.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg5ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
+  return vluxseg5ei32_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg6.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg6ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
+  return vluxseg6ei32_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg7.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg7ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
+  return vluxseg7ei32_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg8.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP8]], <vscale x 4 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg8ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
+  return vluxseg8ei32_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg2.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
+  return vluxseg2ei32_v_f16m2(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg3.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg3ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
+  return vluxseg3ei32_v_f16m2(v0, v1, v2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg4.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP4]], <vscale x 8 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg4ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
+  return vluxseg4ei32_v_f16m2(v0, v1, v2, v3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half> } @llvm.riscv.vluxseg2.nxv16f16.nxv16i32.i64(half* [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP1]], <vscale x 16 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP2]], <vscale x 16 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei32_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint32m8_t bindex, size_t vl) {
+  return vluxseg2ei32_v_f16m4(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg2.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
+  return vluxseg2ei64_v_f16mf4(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg3.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg3ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
+  return vluxseg3ei64_v_f16mf4(v0, v1, v2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg4.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg4ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
+  return vluxseg4ei64_v_f16mf4(v0, v1, v2, v3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg5.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg5ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
+  return vluxseg5ei64_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg6.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg6ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
+  return vluxseg6ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg7.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg7ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
+  return vluxseg7ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg8.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP8]], <vscale x 1 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg8ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
+  return vluxseg8ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg2.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
+  return vluxseg2ei64_v_f16mf2(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg3.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg3ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
+  return vluxseg3ei64_v_f16mf2(v0, v1, v2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg4.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg4ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
+  return vluxseg4ei64_v_f16mf2(v0, v1, v2, v3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg5.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg5ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
+  return vluxseg5ei64_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg6.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg6ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
+  return vluxseg6ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg7.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg7ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
+  return vluxseg7ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg8.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP8]], <vscale x 2 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg8ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
+  return vluxseg8ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg2.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
+  return vluxseg2ei64_v_f16m1(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg3.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg3ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
+  return vluxseg3ei64_v_f16m1(v0, v1, v2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg4.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg4ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
+  return vluxseg4ei64_v_f16m1(v0, v1, v2, v3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg5.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg5ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
+  return vluxseg5ei64_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg6.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg6ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
+  return vluxseg6ei64_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg7.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg7ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
+  return vluxseg7ei64_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg8.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP8]], <vscale x 4 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg8ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
+  return vluxseg8ei64_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg2.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
+  return vluxseg2ei64_v_f16m2(v0, v1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg3.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg3ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
+  return vluxseg3ei64_v_f16m2(v0, v1, v2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg4.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP4]], <vscale x 8 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg4ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
+  return vluxseg4ei64_v_f16m2(v0, v1, v2, v3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
+  return vluxseg2ei8_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg3ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
+  return vluxseg3ei8_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg4ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
+  return vluxseg4ei8_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg5ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
+  return vluxseg5ei8_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg6ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
+  return vluxseg6ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], <vscale x 1 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg7ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
+  return vluxseg7ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], <vscale x 1 x half> [[MASKEDOFF6:%.*]], <vscale x 1 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP8]], <vscale x 1 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg8ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) {
+  return vluxseg8ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
+  return vluxseg2ei8_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg3ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
+  return vluxseg3ei8_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg4ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
+  return vluxseg4ei8_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg5ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
+  return vluxseg5ei8_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg6ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
+  return vluxseg6ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], <vscale x 2 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg7ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
+  return vluxseg7ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], <vscale x 2 x half> [[MASKEDOFF6:%.*]], <vscale x 2 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP8]], <vscale x 2 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg8ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) {
+  return vluxseg8ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
+  return vluxseg2ei8_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg3ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
+  return vluxseg3ei8_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg4ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
+  return vluxseg4ei8_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg5ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
+  return vluxseg5ei8_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg6ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
+  return vluxseg6ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], <vscale x 4 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg7ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
+  return vluxseg7ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], <vscale x 4 x half> [[MASKEDOFF6:%.*]], <vscale x 4 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP8]], <vscale x 4 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg8ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) {
+  return vluxseg8ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei8_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
+  return vluxseg2ei8_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], <vscale x 8 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg3ei8_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
+  return vluxseg3ei8_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], <vscale x 8 x half> [[MASKEDOFF2:%.*]], <vscale x 8 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP4]], <vscale x 8 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg4ei8_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl) {
+  return vluxseg4ei8_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half> } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8.i64(<vscale x 16 x half> [[MASKEDOFF0:%.*]], <vscale x 16 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP1]], <vscale x 16 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP2]], <vscale x 16 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei8_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl) {
+  return vluxseg2ei8_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
+  return vluxseg2ei16_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg3ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
+  return vluxseg3ei16_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg4ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
+  return vluxseg4ei16_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg5ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
+  return vluxseg5ei16_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg6ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
+  return vluxseg6ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], <vscale x 1 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg7ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
+  return vluxseg7ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], <vscale x 1 x half> [[MASKEDOFF6:%.*]], <vscale x 1 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP8]], <vscale x 1 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg8ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) {
+  return vluxseg8ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
+  return vluxseg2ei16_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg3ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
+  return vluxseg3ei16_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg4ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
+  return vluxseg4ei16_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg5ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
+  return vluxseg5ei16_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg6ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
+  return vluxseg6ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], <vscale x 2 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg7ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
+  return vluxseg7ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], <vscale x 2 x half> [[MASKEDOFF6:%.*]], <vscale x 2 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP8]], <vscale x 2 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg8ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) {
+  return vluxseg8ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
+  return vluxseg2ei16_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg3ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
+  return vluxseg3ei16_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg4ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
+  return vluxseg4ei16_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg5ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
+  return vluxseg5ei16_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg6ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
+  return vluxseg6ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], <vscale x 4 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg7ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
+  return vluxseg7ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], <vscale x 4 x half> [[MASKEDOFF6:%.*]], <vscale x 4 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP8]], <vscale x 4 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg8ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl) {
+  return vluxseg8ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16.i64(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
+  return vluxseg2ei16_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16.i64(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], <vscale x 8 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg3ei16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
+  return vluxseg3ei16_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16.i64(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], <vscale x 8 x half> [[MASKEDOFF2:%.*]], <vscale x 8 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP4]], <vscale x 8 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg4ei16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl) {
+  return vluxseg4ei16_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half> } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16.i64(<vscale x 16 x half> [[MASKEDOFF0:%.*]], <vscale x 16 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP1]], <vscale x 16 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP2]], <vscale x 16 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei16_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl) {
+  return vluxseg2ei16_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
+  return vluxseg2ei32_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg3ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
+  return vluxseg3ei32_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg4ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
+  return vluxseg4ei32_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg5ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
+  return vluxseg5ei32_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg6ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
+  return vluxseg6ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], <vscale x 1 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg7ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
+  return vluxseg7ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], <vscale x 1 x half> [[MASKEDOFF6:%.*]], <vscale x 1 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP8]], <vscale x 1 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg8ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) {
+  return vluxseg8ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
+  return vluxseg2ei32_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg3ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
+  return vluxseg3ei32_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg4ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
+  return vluxseg4ei32_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg5ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
+  return vluxseg5ei32_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg6ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
+  return vluxseg6ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], <vscale x 2 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg7ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
+  return vluxseg7ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], <vscale x 2 x half> [[MASKEDOFF6:%.*]], <vscale x 2 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP8]], <vscale x 2 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg8ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl) {
+  return vluxseg8ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
+  return vluxseg2ei32_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg3ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
+  return vluxseg3ei32_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg4ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
+  return vluxseg4ei32_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg5ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
+  return vluxseg5ei32_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg6ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
+  return vluxseg6ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], <vscale x 4 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg7ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
+  return vluxseg7ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], <vscale x 4 x half> [[MASKEDOFF6:%.*]], <vscale x 4 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP8]], <vscale x 4 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg8ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl) {
+  return vluxseg8ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32.i64(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei32_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
+  return vluxseg2ei32_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32.i64(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], <vscale x 8 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg3ei32_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
+  return vluxseg3ei32_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32.i64(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], <vscale x 8 x half> [[MASKEDOFF2:%.*]], <vscale x 8 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP4]], <vscale x 8 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg4ei32_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl) {
+  return vluxseg4ei32_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 16 x half>, <vscale x 16 x half> } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32.i64(<vscale x 16 x half> [[MASKEDOFF0:%.*]], <vscale x 16 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP1]], <vscale x 16 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 16 x half> [[TMP2]], <vscale x 16 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei32_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl) {
+  return vluxseg2ei32_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
+  return vluxseg2ei64_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg3ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
+  return vluxseg3ei64_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg4ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
+  return vluxseg4ei64_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg5ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
+  return vluxseg5ei64_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg6ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
+  return vluxseg6ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], <vscale x 1 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg7ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
+  return vluxseg7ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[MASKEDOFF0:%.*]], <vscale x 1 x half> [[MASKEDOFF1:%.*]], <vscale x 1 x half> [[MASKEDOFF2:%.*]], <vscale x 1 x half> [[MASKEDOFF3:%.*]], <vscale x 1 x half> [[MASKEDOFF4:%.*]], <vscale x 1 x half> [[MASKEDOFF5:%.*]], <vscale x 1 x half> [[MASKEDOFF6:%.*]], <vscale x 1 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP1]], <vscale x 1 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP2]], <vscale x 1 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP3]], <vscale x 1 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP4]], <vscale x 1 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP5]], <vscale x 1 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP6]], <vscale x 1 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP7]], <vscale x 1 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 1 x half> [[TMP8]], <vscale x 1 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg8ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl) {
+  return vluxseg8ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
+  return vluxseg2ei64_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg3ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
+  return vluxseg3ei64_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg4ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
+  return vluxseg4ei64_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg5ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
+  return vluxseg5ei64_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg6ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
+  return vluxseg6ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], <vscale x 2 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg7ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
+  return vluxseg7ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[MASKEDOFF0:%.*]], <vscale x 2 x half> [[MASKEDOFF1:%.*]], <vscale x 2 x half> [[MASKEDOFF2:%.*]], <vscale x 2 x half> [[MASKEDOFF3:%.*]], <vscale x 2 x half> [[MASKEDOFF4:%.*]], <vscale x 2 x half> [[MASKEDOFF5:%.*]], <vscale x 2 x half> [[MASKEDOFF6:%.*]], <vscale x 2 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP1]], <vscale x 2 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP2]], <vscale x 2 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP3]], <vscale x 2 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP4]], <vscale x 2 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP5]], <vscale x 2 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP6]], <vscale x 2 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP7]], <vscale x 2 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 2 x half> [[TMP8]], <vscale x 2 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg8ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl) {
+  return vluxseg8ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
+  return vluxseg2ei64_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg3ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
+  return vluxseg3ei64_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg4ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
+  return vluxseg4ei64_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg5ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
+  return vluxseg5ei64_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg6ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
+  return vluxseg6ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], <vscale x 4 x half> [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg7ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
+  return vluxseg7ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[MASKEDOFF0:%.*]], <vscale x 4 x half> [[MASKEDOFF1:%.*]], <vscale x 4 x half> [[MASKEDOFF2:%.*]], <vscale x 4 x half> [[MASKEDOFF3:%.*]], <vscale x 4 x half> [[MASKEDOFF4:%.*]], <vscale x 4 x half> [[MASKEDOFF5:%.*]], <vscale x 4 x half> [[MASKEDOFF6:%.*]], <vscale x 4 x half> [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP1]], <vscale x 4 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP2]], <vscale x 4 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP3]], <vscale x 4 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP4]], <vscale x 4 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP5:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 4
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP5]], <vscale x 4 x half>* [[V4:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 5
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP6]], <vscale x 4 x half>* [[V5:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 6
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP7]], <vscale x 4 x half>* [[V6:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half> } [[TMP0]], 7
+// CHECK-RV64-NEXT:    store <vscale x 4 x half> [[TMP8]], <vscale x 4 x half>* [[V7:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg8ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl) {
+  return vluxseg8ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i64.i64(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei64_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
+  return vluxseg2ei64_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i64.i64(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], <vscale x 8 x half> [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg3ei64_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
+  return vluxseg3ei64_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i64.i64(<vscale x 8 x half> [[MASKEDOFF0:%.*]], <vscale x 8 x half> [[MASKEDOFF1:%.*]], <vscale x 8 x half> [[MASKEDOFF2:%.*]], <vscale x 8 x half> [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP1]], <vscale x 8 x half>* [[V0:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP2]], <vscale x 8 x half>* [[V1:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP3]], <vscale x 8 x half>* [[V2:%.*]], align 2
+// CHECK-RV64-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 3
+// CHECK-RV64-NEXT:    store <vscale x 8 x half> [[TMP4]], <vscale x 8 x half>* [[V3:%.*]], align 2
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg4ei64_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl) {
+  return vluxseg4ei64_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c
index e9675dbcece6..d1ba4b1bd26b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -974,3 +975,57 @@ vfloat64m8_t test_vmerge_vvm_f64m8(vbool8_t mask, vfloat64m8_t op1,
                                    vfloat64m8_t op2, size_t vl) {
   return vmerge_vvm_f64m8(mask, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vmerge_vvm_f16mf4 (vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vmerge_vvm_f16mf4(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vmerge_vvm_f16mf2 (vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vmerge_vvm_f16mf2(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vmerge_vvm_f16m1 (vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vmerge_vvm_f16m1(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vmerge_vvm_f16m2 (vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vmerge_vvm_f16m2(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vmerge_vvm_f16m4 (vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vmerge_vvm_f16m4(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmerge_vvm_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vmerge_vvm_f16m8 (vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return vmerge_vvm_f16m8(mask, op1, op2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c
index b40e35b8a12d..413ec6c84761 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -361,3 +362,219 @@ vbool8_t test_vmfeq_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
                                   vfloat64m8_t op1, double op2, size_t vl) {
   return vmfeq_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f16mf4_b64(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_f16mf4_b64 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vmfeq_vv_f16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f16mf4_b64(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_f16mf4_b64 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vmfeq_vf_f16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f16mf2_b32(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_f16mf2_b32 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vmfeq_vv_f16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f16mf2_b32(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_f16mf2_b32 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vmfeq_vf_f16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m1_b16(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_f16m1_b16 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vmfeq_vv_f16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m1_b16(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_f16m1_b16 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vmfeq_vf_f16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m2_b8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_f16m2_b8 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vmfeq_vv_f16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m2_b8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_f16m2_b8 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vmfeq_vf_f16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m4_b4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vv_f16m4_b4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vmfeq_vv_f16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m4_b4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vf_f16m4_b4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vmfeq_vf_f16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m8_b2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vv_f16m8_b2 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return vmfeq_vv_f16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m8_b2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vf_f16m8_b2 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return vmfeq_vf_f16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f16mf4_b64_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_f16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vmfeq_vv_f16mf4_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f16mf4_b64_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f16.f16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_f16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vmfeq_vf_f16mf4_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f16mf2_b32_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_f16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vmfeq_vv_f16mf2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f16mf2_b32_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f16.f16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_f16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vmfeq_vf_f16mf2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m1_b16_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_f16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vmfeq_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m1_b16_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f16.f16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_f16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vmfeq_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m2_b8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_f16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vmfeq_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m2_b8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f16.f16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_f16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vmfeq_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m4_b4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vv_f16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vmfeq_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m4_b4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f16.f16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vf_f16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vmfeq_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m8_b2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vv_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return vmfeq_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m8_b2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32f16.f16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return vmfeq_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c
index f8592ca28d72..8a7a36260920 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -329,3 +330,218 @@ vbool8_t test_vmfge_vf_f64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat64m8
   return vmfge_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
+// CHECK-RV64-LABEL: @test_vmfge_vv_f16mf4_b64(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vv_f16mf4_b64 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vmfge_vv_f16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfge_vf_f16mf4_b64(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_f16mf4_b64 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vmfge_vf_f16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfge_vv_f16mf2_b32(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vv_f16mf2_b32 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vmfge_vv_f16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfge_vf_f16mf2_b32(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_f16mf2_b32 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vmfge_vf_f16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfge_vv_f16m1_b16(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vv_f16m1_b16 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vmfge_vv_f16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfge_vf_f16m1_b16(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_f16m1_b16 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vmfge_vf_f16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfge_vv_f16m2_b8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vv_f16m2_b8 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vmfge_vv_f16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfge_vf_f16m2_b8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_f16m2_b8 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vmfge_vf_f16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfge_vv_f16m4_b4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vv_f16m4_b4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vmfge_vv_f16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfge_vf_f16m4_b4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vf_f16m4_b4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vmfge_vf_f16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfge_vv_f16m8_b2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vv_f16m8_b2 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return vmfge_vv_f16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfge_vf_f16m8_b2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vf_f16m8_b2 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return vmfge_vf_f16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfge_vv_f16mf4_b64_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vv_f16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vmfge_vv_f16mf4_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfge_vf_f16mf4_b64_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f16.f16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_f16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vmfge_vf_f16mf4_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfge_vv_f16mf2_b32_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vv_f16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vmfge_vv_f16mf2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfge_vf_f16mf2_b32_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f16.f16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_f16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vmfge_vf_f16mf2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfge_vv_f16m1_b16_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vv_f16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vmfge_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfge_vf_f16m1_b16_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f16.f16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_f16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vmfge_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfge_vv_f16m2_b8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vv_f16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vmfge_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfge_vf_f16m2_b8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f16.f16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_f16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vmfge_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfge_vv_f16m4_b4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vv_f16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vmfge_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfge_vf_f16m4_b4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f16.f16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vf_f16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vmfge_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfge_vv_f16m8_b2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vv_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return vmfge_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfge_vf_f16m8_b2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32f16.f16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return vmfge_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c
index 802c6d1f4389..0e9dbd463dd7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -329,3 +330,218 @@ vbool8_t test_vmfgt_vf_f64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat64m8
   return vmfgt_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
 
+// CHECK-RV64-LABEL: @test_vmfgt_vv_f16mf4_b64(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vv_f16mf4_b64 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vmfgt_vv_f16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f16mf4_b64(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_f16mf4_b64 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vmfgt_vf_f16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfgt_vv_f16mf2_b32(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vv_f16mf2_b32 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vmfgt_vv_f16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f16mf2_b32(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_f16mf2_b32 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vmfgt_vf_f16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m1_b16(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vv_f16m1_b16 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vmfgt_vv_f16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m1_b16(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_f16m1_b16 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vmfgt_vf_f16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m2_b8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vv_f16m2_b8 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vmfgt_vv_f16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m2_b8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_f16m2_b8 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vmfgt_vf_f16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m4_b4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vv_f16m4_b4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vmfgt_vv_f16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m4_b4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vf_f16m4_b4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vmfgt_vf_f16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m8_b2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vv_f16m8_b2 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return vmfgt_vv_f16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m8_b2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vf_f16m8_b2 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return vmfgt_vf_f16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfgt_vv_f16mf4_b64_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vv_f16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vmfgt_vv_f16mf4_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f16mf4_b64_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f16.f16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_f16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vmfgt_vf_f16mf4_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfgt_vv_f16mf2_b32_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vv_f16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vmfgt_vv_f16mf2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f16mf2_b32_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f16.f16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_f16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vmfgt_vf_f16mf2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m1_b16_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vv_f16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vmfgt_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m1_b16_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f16.f16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_f16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vmfgt_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m2_b8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vv_f16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vmfgt_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m2_b8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f16.f16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_f16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vmfgt_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m4_b4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vv_f16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vmfgt_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m4_b4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16f16.f16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vf_f16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vmfgt_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m8_b2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vv_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return vmfgt_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m8_b2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32f16.f16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return vmfgt_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c
index 5aee3d0b7118..77fac48abeef 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -361,3 +362,219 @@ vbool8_t test_vmfle_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
                                   vfloat64m8_t op1, double op2, size_t vl) {
   return vmfle_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vmfle_vv_f16mf4_b64(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_f16mf4_b64 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vmfle_vv_f16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfle_vf_f16mf4_b64(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_f16mf4_b64 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vmfle_vf_f16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfle_vv_f16mf2_b32(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_f16mf2_b32 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vmfle_vv_f16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfle_vf_f16mf2_b32(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_f16mf2_b32 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vmfle_vf_f16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfle_vv_f16m1_b16(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_f16m1_b16 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vmfle_vv_f16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfle_vf_f16m1_b16(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_f16m1_b16 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vmfle_vf_f16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfle_vv_f16m2_b8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_f16m2_b8 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vmfle_vv_f16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfle_vf_f16m2_b8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_f16m2_b8 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vmfle_vf_f16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfle_vv_f16m4_b4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vv_f16m4_b4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vmfle_vv_f16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfle_vf_f16m4_b4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vf_f16m4_b4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vmfle_vf_f16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfle_vv_f16m8_b2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vv_f16m8_b2 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return vmfle_vv_f16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfle_vf_f16m8_b2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vf_f16m8_b2 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return vmfle_vf_f16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfle_vv_f16mf4_b64_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_f16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vmfle_vv_f16mf4_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfle_vf_f16mf4_b64_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16.f16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_f16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vmfle_vf_f16mf4_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfle_vv_f16mf2_b32_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_f16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vmfle_vv_f16mf2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfle_vf_f16mf2_b32_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16.f16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_f16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vmfle_vf_f16mf2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfle_vv_f16m1_b16_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_f16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vmfle_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfle_vf_f16m1_b16_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16.f16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_f16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vmfle_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfle_vv_f16m2_b8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_f16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vmfle_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfle_vf_f16m2_b8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16.f16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_f16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vmfle_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfle_vv_f16m4_b4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vv_f16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vmfle_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfle_vf_f16m4_b4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16.f16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vf_f16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vmfle_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfle_vv_f16m8_b2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vv_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return vmfle_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfle_vf_f16m8_b2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32f16.f16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return vmfle_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c
index 6f76063a03cf..bf9158d11857 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -361,3 +362,219 @@ vbool8_t test_vmflt_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
                                   vfloat64m8_t op1, double op2, size_t vl) {
   return vmflt_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vmflt_vv_f16mf4_b64(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_f16mf4_b64 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vmflt_vv_f16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmflt_vf_f16mf4_b64(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_f16mf4_b64 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vmflt_vf_f16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmflt_vv_f16mf2_b32(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_f16mf2_b32 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vmflt_vv_f16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmflt_vf_f16mf2_b32(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_f16mf2_b32 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vmflt_vf_f16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmflt_vv_f16m1_b16(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_f16m1_b16 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vmflt_vv_f16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmflt_vf_f16m1_b16(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_f16m1_b16 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vmflt_vf_f16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmflt_vv_f16m2_b8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_f16m2_b8 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vmflt_vv_f16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmflt_vf_f16m2_b8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_f16m2_b8 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vmflt_vf_f16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmflt_vv_f16m4_b4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vv_f16m4_b4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vmflt_vv_f16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmflt_vf_f16m4_b4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vf_f16m4_b4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vmflt_vf_f16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmflt_vv_f16m8_b2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vv_f16m8_b2 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return vmflt_vv_f16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmflt_vf_f16m8_b2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vf_f16m8_b2 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return vmflt_vf_f16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmflt_vv_f16mf4_b64_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_f16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vmflt_vv_f16mf4_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmflt_vf_f16mf4_b64_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f16.f16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_f16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vmflt_vf_f16mf4_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmflt_vv_f16mf2_b32_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_f16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vmflt_vv_f16mf2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmflt_vf_f16mf2_b32_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f16.f16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_f16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vmflt_vf_f16mf2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmflt_vv_f16m1_b16_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_f16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vmflt_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmflt_vf_f16m1_b16_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f16.f16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_f16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vmflt_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmflt_vv_f16m2_b8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_f16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vmflt_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmflt_vf_f16m2_b8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f16.f16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_f16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vmflt_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmflt_vv_f16m4_b4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vv_f16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vmflt_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmflt_vf_f16m4_b4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f16.f16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vf_f16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vmflt_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmflt_vv_f16m8_b2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vv_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return vmflt_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmflt_vf_f16m8_b2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32f16.f16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return vmflt_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c
index ec2468e3ea66..a59d5c24a0f0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -361,3 +362,219 @@ vbool8_t test_vmfne_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
                                   vfloat64m8_t op1, double op2, size_t vl) {
   return vmfne_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vmfne_vv_f16mf4_b64(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_f16mf4_b64 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vmfne_vv_f16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfne_vf_f16mf4_b64(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_f16mf4_b64 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vmfne_vf_f16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfne_vv_f16mf2_b32(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_f16mf2_b32 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vmfne_vv_f16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfne_vf_f16mf2_b32(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_f16mf2_b32 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vmfne_vf_f16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfne_vv_f16m1_b16(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_f16m1_b16 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vmfne_vv_f16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfne_vf_f16m1_b16(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_f16m1_b16 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vmfne_vf_f16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfne_vv_f16m2_b8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_f16m2_b8 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vmfne_vv_f16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfne_vf_f16m2_b8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_f16m2_b8 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vmfne_vf_f16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfne_vv_f16m4_b4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vv_f16m4_b4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vmfne_vv_f16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfne_vf_f16m4_b4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vf_f16m4_b4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vmfne_vf_f16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfne_vv_f16m8_b2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vv_f16m8_b2 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return vmfne_vv_f16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfne_vf_f16m8_b2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vf_f16m8_b2 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return vmfne_vf_f16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfne_vv_f16mf4_b64_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_f16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return vmfne_vv_f16mf4_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfne_vf_f16mf4_b64_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16.f16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_f16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return vmfne_vf_f16mf4_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfne_vv_f16mf2_b32_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_f16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return vmfne_vv_f16mf2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfne_vf_f16mf2_b32_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16.f16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_f16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return vmfne_vf_f16mf2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfne_vv_f16m1_b16_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_f16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return vmfne_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfne_vf_f16m1_b16_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16.f16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_f16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return vmfne_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfne_vv_f16m2_b8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_f16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return vmfne_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfne_vf_f16m2_b8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16.f16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_f16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return vmfne_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfne_vv_f16m4_b4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vv_f16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return vmfne_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfne_vf_f16m4_b4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16.f16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vf_f16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return vmfne_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfne_vv_f16m8_b2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vv_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return vmfne_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmfne_vf_f16m8_b2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32f16.f16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return vmfne_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmv.c
index de0ea1494012..f0997d9d0655 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmv.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -1641,3 +1642,57 @@ uint64_t test_vmv_x_s_u64m8_u64(vuint64m8_t src) {
 vuint64m8_t test_vmv_s_x_u64m8(vuint64m8_t dst, uint64_t src, size_t vl) {
   return vmv_s_x_u64m8(dst, src, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vmv_v_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vmv.v.v.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vmv_v_v_f16mf4 (vfloat16mf4_t src, size_t vl) {
+  return vmv_v_v_f16mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmv_v_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vmv.v.v.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vmv_v_v_f16mf2 (vfloat16mf2_t src, size_t vl) {
+  return vmv_v_v_f16mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmv_v_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vmv.v.v.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vmv_v_v_f16m1 (vfloat16m1_t src, size_t vl) {
+  return vmv_v_v_f16m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmv_v_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vmv.v.v.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vmv_v_v_f16m2 (vfloat16m2_t src, size_t vl) {
+  return vmv_v_v_f16m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmv_v_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vmv.v.v.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vmv_v_v_f16m4 (vfloat16m4_t src, size_t vl) {
+  return vmv_v_v_f16m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmv_v_v_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vmv.v.v.nxv32f16.i64(<vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vmv_v_v_f16m8 (vfloat16m8_t src, size_t vl) {
+  return vmv_v_v_f16m8(src, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vreinterpret.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vreinterpret.c
index 1e5846a7b61d..272b4bd1c4e3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vreinterpret.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vreinterpret.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -1688,3 +1689,219 @@ vuint32m4_t test_vreinterpret_v_u64m4_u32m4(vuint64m4_t src) {
 vuint32m8_t test_vreinterpret_v_u64m8_u32m8(vuint64m8_t src) {
   return vreinterpret_v_u64m8_u32m8(src);
 }
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f16mf4_i16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x half> [[SRC:%.*]] to <vscale x 1 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vreinterpret_v_f16mf4_i16mf4 (vfloat16mf4_t src) {
+  return vreinterpret_v_f16mf4_i16mf4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f16mf2_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x half> [[SRC:%.*]] to <vscale x 2 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vreinterpret_v_f16mf2_i16mf2 (vfloat16mf2_t src) {
+  return vreinterpret_v_f16mf2_i16mf2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m1_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x half> [[SRC:%.*]] to <vscale x 4 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vreinterpret_v_f16m1_i16m1 (vfloat16m1_t src) {
+  return vreinterpret_v_f16m1_i16m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m2_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x half> [[SRC:%.*]] to <vscale x 8 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vreinterpret_v_f16m2_i16m2 (vfloat16m2_t src) {
+  return vreinterpret_v_f16m2_i16m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m4_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x half> [[SRC:%.*]] to <vscale x 16 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vreinterpret_v_f16m4_i16m4 (vfloat16m4_t src) {
+  return vreinterpret_v_f16m4_i16m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m8_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x half> [[SRC:%.*]] to <vscale x 32 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vreinterpret_v_f16m8_i16m8 (vfloat16m8_t src) {
+  return vreinterpret_v_f16m8_i16m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f16mf4_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x half> [[SRC:%.*]] to <vscale x 1 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vreinterpret_v_f16mf4_u16mf4 (vfloat16mf4_t src) {
+  return vreinterpret_v_f16mf4_u16mf4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f16mf2_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x half> [[SRC:%.*]] to <vscale x 2 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vreinterpret_v_f16mf2_u16mf2 (vfloat16mf2_t src) {
+  return vreinterpret_v_f16mf2_u16mf2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m1_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x half> [[SRC:%.*]] to <vscale x 4 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vreinterpret_v_f16m1_u16m1 (vfloat16m1_t src) {
+  return vreinterpret_v_f16m1_u16m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m2_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x half> [[SRC:%.*]] to <vscale x 8 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vreinterpret_v_f16m2_u16m2 (vfloat16m2_t src) {
+  return vreinterpret_v_f16m2_u16m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m4_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x half> [[SRC:%.*]] to <vscale x 16 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vreinterpret_v_f16m4_u16m4 (vfloat16m4_t src) {
+  return vreinterpret_v_f16m4_u16m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m8_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x half> [[SRC:%.*]] to <vscale x 32 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vreinterpret_v_f16m8_u16m8 (vfloat16m8_t src) {
+  return vreinterpret_v_f16m8_u16m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf4_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i16> [[SRC:%.*]] to <vscale x 1 x half>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vreinterpret_v_i16mf4_f16mf4 (vint16mf4_t src) {
+  return vreinterpret_v_i16mf4_f16mf4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf2_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i16> [[SRC:%.*]] to <vscale x 2 x half>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vreinterpret_v_i16mf2_f16mf2 (vint16mf2_t src) {
+  return vreinterpret_v_i16mf2_f16mf2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i16> [[SRC:%.*]] to <vscale x 4 x half>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vreinterpret_v_i16m1_f16m1 (vint16m1_t src) {
+  return vreinterpret_v_i16m1_f16m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i16> [[SRC:%.*]] to <vscale x 8 x half>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vreinterpret_v_i16m2_f16m2 (vint16m2_t src) {
+  return vreinterpret_v_i16m2_f16m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i16> [[SRC:%.*]] to <vscale x 16 x half>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vreinterpret_v_i16m4_f16m4 (vint16m4_t src) {
+  return vreinterpret_v_i16m4_f16m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[SRC:%.*]] to <vscale x 32 x half>
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vreinterpret_v_i16m8_f16m8 (vint16m8_t src) {
+  return vreinterpret_v_i16m8_f16m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf4_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i16> [[SRC:%.*]] to <vscale x 1 x half>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vreinterpret_v_u16mf4_f16mf4 (vuint16mf4_t src) {
+  return vreinterpret_v_u16mf4_f16mf4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf2_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i16> [[SRC:%.*]] to <vscale x 2 x half>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vreinterpret_v_u16mf2_f16mf2 (vuint16mf2_t src) {
+  return vreinterpret_v_u16mf2_f16mf2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i16> [[SRC:%.*]] to <vscale x 4 x half>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vreinterpret_v_u16m1_f16m1 (vuint16m1_t src) {
+  return vreinterpret_v_u16m1_f16m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i16> [[SRC:%.*]] to <vscale x 8 x half>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vreinterpret_v_u16m2_f16m2 (vuint16m2_t src) {
+  return vreinterpret_v_u16m2_f16m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i16> [[SRC:%.*]] to <vscale x 16 x half>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vreinterpret_v_u16m4_f16m4 (vuint16m4_t src) {
+  return vreinterpret_v_u16m4_f16m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[SRC:%.*]] to <vscale x 32 x half>
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vreinterpret_v_u16m8_f16m8 (vuint16m8_t src) {
+  return vreinterpret_v_u16m8_f16m8(src);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c
index b42c54138cf3..9b2b01b62bf2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -3208,3 +3209,327 @@ vfloat64m8_t test_vrgatherei16_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
                                           size_t vl) {
   return vrgatherei16_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vrgather_vv_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vrgather.vv.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vrgather_vv_f16mf4 (vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) {
+  return vrgather_vv_f16mf4(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vrgather_vx_f16mf4 (vfloat16mf4_t op1, size_t index, size_t vl) {
+  return vrgather_vx_f16mf4(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgather_vv_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vrgather.vv.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vrgather_vv_f16mf2 (vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) {
+  return vrgather_vv_f16mf2(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vrgather_vx_f16mf2 (vfloat16mf2_t op1, size_t index, size_t vl) {
+  return vrgather_vx_f16mf2(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgather_vv_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vrgather.vv.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vrgather_vv_f16m1 (vfloat16m1_t op1, vuint16m1_t index, size_t vl) {
+  return vrgather_vv_f16m1(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgather_vx_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vrgather_vx_f16m1 (vfloat16m1_t op1, size_t index, size_t vl) {
+  return vrgather_vx_f16m1(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgather_vv_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vrgather.vv.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vrgather_vv_f16m2 (vfloat16m2_t op1, vuint16m2_t index, size_t vl) {
+  return vrgather_vv_f16m2(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgather_vx_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vrgather_vx_f16m2 (vfloat16m2_t op1, size_t index, size_t vl) {
+  return vrgather_vx_f16m2(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgather_vv_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vrgather.vv.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vrgather_vv_f16m4 (vfloat16m4_t op1, vuint16m4_t index, size_t vl) {
+  return vrgather_vv_f16m4(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgather_vx_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vrgather_vx_f16m4 (vfloat16m4_t op1, size_t index, size_t vl) {
+  return vrgather_vx_f16m4(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgather_vv_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vrgather.vv.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vrgather_vv_f16m8 (vfloat16m8_t op1, vuint16m8_t index, size_t vl) {
+  return vrgather_vv_f16m8(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgather_vx_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vrgather_vx_f16m8 (vfloat16m8_t op1, size_t index, size_t vl) {
+  return vrgather_vx_f16m8(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vrgatherei16.vv.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vrgatherei16_vv_f16mf4 (vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+  return vrgatherei16_vv_f16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vrgatherei16.vv.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vrgatherei16_vv_f16mf2 (vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+  return vrgatherei16_vv_f16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vrgatherei16.vv.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vrgatherei16_vv_f16m1 (vfloat16m1_t op1, vuint16m1_t op2, size_t vl) {
+  return vrgatherei16_vv_f16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vrgatherei16.vv.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vrgatherei16_vv_f16m2 (vfloat16m2_t op1, vuint16m2_t op2, size_t vl) {
+  return vrgatherei16_vv_f16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vrgatherei16.vv.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vrgatherei16_vv_f16m4 (vfloat16m4_t op1, vuint16m4_t op2, size_t vl) {
+  return vrgatherei16_vv_f16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vrgatherei16.vv.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vrgatherei16_vv_f16m8 (vfloat16m8_t op1, vuint16m8_t op2, size_t vl) {
+  return vrgatherei16_vv_f16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgather_vv_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vrgather.vv.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vrgather_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) {
+  return vrgather_vv_f16mf4_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vrgather_vx_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t index, size_t vl) {
+  return vrgather_vx_f16mf4_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgather_vv_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vrgather.vv.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vrgather_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) {
+  return vrgather_vv_f16mf2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vrgather_vx_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t index, size_t vl) {
+  return vrgather_vx_f16mf2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgather_vv_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vrgather.vv.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vrgather_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t index, size_t vl) {
+  return vrgather_vv_f16m1_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgather_vx_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vrgather_vx_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t index, size_t vl) {
+  return vrgather_vx_f16m1_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgather_vv_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vrgather.vv.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vrgather_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t index, size_t vl) {
+  return vrgather_vv_f16m2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgather_vx_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vrgather_vx_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t index, size_t vl) {
+  return vrgather_vx_f16m2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgather_vv_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vrgather.vv.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vrgather_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t index, size_t vl) {
+  return vrgather_vv_f16m4_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgather_vx_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vrgather_vx_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t index, size_t vl) {
+  return vrgather_vx_f16m4_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgather_vv_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vrgather.vv.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vrgather_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t index, size_t vl) {
+  return vrgather_vv_f16m8_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgather_vx_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vrgather_vx_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t index, size_t vl) {
+  return vrgather_vx_f16m8_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vrgatherei16_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+  return vrgatherei16_vv_f16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vrgatherei16_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+  return vrgatherei16_vv_f16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vrgatherei16_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t op2, size_t vl) {
+  return vrgatherei16_vv_f16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vrgatherei16_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t op2, size_t vl) {
+  return vrgatherei16_vv_f16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vrgatherei16_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t op2, size_t vl) {
+  return vrgatherei16_vv_f16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vrgatherei16_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t op2, size_t vl) {
+  return vrgatherei16_vv_f16m8_m(mask, maskedoff, op1, op2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c
index e2192eab3a53..4c9713d025cd 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -1109,3 +1110,111 @@ vfloat64m8_t test_vslidedown_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dst,
                                         size_t vl) {
   return vslidedown_vx_f64m8_m(mask, dst, src, offset, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vslidedown_vx_f16mf4 (vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16mf4(dest, src, offset, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vslidedown_vx_f16mf2 (vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16mf2(dest, src, offset, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vslidedown_vx_f16m1 (vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16m1(dest, src, offset, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vslidedown_vx_f16m2 (vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16m2(dest, src, offset, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vslidedown_vx_f16m4 (vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16m4(dest, src, offset, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslidedown.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vslidedown_vx_f16m8 (vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16m8(dest, src, offset, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vslidedown_vx_f16mf4_m (vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16mf4_m(mask, dest, src, offset, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vslidedown_vx_f16mf2_m (vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16mf2_m(mask, dest, src, offset, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vslidedown_vx_f16m1_m (vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16m1_m(mask, dest, src, offset, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vslidedown_vx_f16m2_m (vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16m2_m(mask, dest, src, offset, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vslidedown_vx_f16m4_m (vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16m4_m(mask, dest, src, offset, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslidedown.mask.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vslidedown_vx_f16m8_m (vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16m8_m(mask, dest, src, offset, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c
index e9757b67a3b4..2bcea737fe2f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -1094,3 +1095,111 @@ vfloat64m8_t test_vslideup_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dst,
                                       size_t vl) {
   return vslideup_vx_f64m8_m(mask, dst, src, offset, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vslideup_vx_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vslideup_vx_f16mf4 (vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) {
+  return vslideup_vx_f16mf4(dest, src, offset, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vslideup_vx_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vslideup_vx_f16mf2 (vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) {
+  return vslideup_vx_f16mf2(dest, src, offset, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vslideup_vx_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vslideup_vx_f16m1 (vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) {
+  return vslideup_vx_f16m1(dest, src, offset, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vslideup_vx_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vslideup_vx_f16m2 (vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) {
+  return vslideup_vx_f16m2(dest, src, offset, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vslideup_vx_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vslideup_vx_f16m4 (vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) {
+  return vslideup_vx_f16m4(dest, src, offset, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vslideup_vx_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslideup.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vslideup_vx_f16m8 (vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) {
+  return vslideup_vx_f16m8(dest, src, offset, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vslideup_vx_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vslideup_vx_f16mf4_m (vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) {
+  return vslideup_vx_f16mf4_m(mask, dest, src, offset, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vslideup_vx_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vslideup_vx_f16mf2_m (vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) {
+  return vslideup_vx_f16mf2_m(mask, dest, src, offset, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vslideup_vx_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vslideup_vx_f16m1_m (vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) {
+  return vslideup_vx_f16m1_m(mask, dest, src, offset, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vslideup_vx_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vslideup_vx_f16m2_m (vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) {
+  return vslideup_vx_f16m2_m(mask, dest, src, offset, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vslideup_vx_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vslideup_vx_f16m4_m (vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) {
+  return vslideup_vx_f16m4_m(mask, dest, src, offset, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vslideup_vx_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslideup.mask.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vslideup_vx_f16m8_m (vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) {
+  return vslideup_vx_f16m8_m(mask, dest, src, offset, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxei.c
index 46e8b231bfab..5b942530b15d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxei.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxei.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -4224,3 +4225,423 @@ void test_vsoxei64_v_f64m8_m(vbool8_t mask, double *base, vuint64m8_t bindex,
                              vfloat64m8_t value, size_t vl) {
   return vsoxei64_v_f64m8_m(mask, base, bindex, value, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[VALUE:%.*]], <vscale x 1 x half>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) {
+  return vsoxei8_v_f16mf4 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[VALUE:%.*]], <vscale x 2 x half>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) {
+  return vsoxei8_v_f16mf2 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[VALUE:%.*]], <vscale x 4 x half>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) {
+  return vsoxei8_v_f16m1 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[VALUE:%.*]], <vscale x 8 x half>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) {
+  return vsoxei8_v_f16m2 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.nxv16f16.nxv16i8.i64(<vscale x 16 x half> [[VALUE:%.*]], <vscale x 16 x half>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei8_v_f16m4 (_Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) {
+  return vsoxei8_v_f16m4 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 32 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.nxv32f16.nxv32i8.i64(<vscale x 32 x half> [[VALUE:%.*]], <vscale x 32 x half>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei8_v_f16m8 (_Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) {
+  return vsoxei8_v_f16m8 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[VALUE:%.*]], <vscale x 1 x half>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) {
+  return vsoxei16_v_f16mf4 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[VALUE:%.*]], <vscale x 2 x half>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) {
+  return vsoxei16_v_f16mf2 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[VALUE:%.*]], <vscale x 4 x half>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) {
+  return vsoxei16_v_f16m1 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.nxv8f16.nxv8i16.i64(<vscale x 8 x half> [[VALUE:%.*]], <vscale x 8 x half>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) {
+  return vsoxei16_v_f16m2 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.nxv16f16.nxv16i16.i64(<vscale x 16 x half> [[VALUE:%.*]], <vscale x 16 x half>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei16_v_f16m4 (_Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) {
+  return vsoxei16_v_f16m4 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 32 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.nxv32f16.nxv32i16.i64(<vscale x 32 x half> [[VALUE:%.*]], <vscale x 32 x half>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei16_v_f16m8 (_Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) {
+  return vsoxei16_v_f16m8 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[VALUE:%.*]], <vscale x 1 x half>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) {
+  return vsoxei32_v_f16mf4 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[VALUE:%.*]], <vscale x 2 x half>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) {
+  return vsoxei32_v_f16mf2 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[VALUE:%.*]], <vscale x 4 x half>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) {
+  return vsoxei32_v_f16m1 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.nxv8f16.nxv8i32.i64(<vscale x 8 x half> [[VALUE:%.*]], <vscale x 8 x half>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) {
+  return vsoxei32_v_f16m2 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.nxv16f16.nxv16i32.i64(<vscale x 16 x half> [[VALUE:%.*]], <vscale x 16 x half>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei32_v_f16m4 (_Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) {
+  return vsoxei32_v_f16m4 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[VALUE:%.*]], <vscale x 1 x half>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) {
+  return vsoxei64_v_f16mf4 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[VALUE:%.*]], <vscale x 2 x half>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) {
+  return vsoxei64_v_f16mf2 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[VALUE:%.*]], <vscale x 4 x half>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) {
+  return vsoxei64_v_f16m1 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.nxv8f16.nxv8i64.i64(<vscale x 8 x half> [[VALUE:%.*]], <vscale x 8 x half>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) {
+  return vsoxei64_v_f16m2 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[VALUE:%.*]], <vscale x 1 x half>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) {
+  return vsoxei8_v_f16mf4_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[VALUE:%.*]], <vscale x 2 x half>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) {
+  return vsoxei8_v_f16mf2_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[VALUE:%.*]], <vscale x 4 x half>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) {
+  return vsoxei8_v_f16m1_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[VALUE:%.*]], <vscale x 8 x half>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) {
+  return vsoxei8_v_f16m2_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8.i64(<vscale x 16 x half> [[VALUE:%.*]], <vscale x 16 x half>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei8_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) {
+  return vsoxei8_v_f16m4_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 32 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8.i64(<vscale x 32 x half> [[VALUE:%.*]], <vscale x 32 x half>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei8_v_f16m8_m (vbool2_t mask, _Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) {
+  return vsoxei8_v_f16m8_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[VALUE:%.*]], <vscale x 1 x half>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) {
+  return vsoxei16_v_f16mf4_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[VALUE:%.*]], <vscale x 2 x half>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) {
+  return vsoxei16_v_f16mf2_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[VALUE:%.*]], <vscale x 4 x half>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) {
+  return vsoxei16_v_f16m1_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16.i64(<vscale x 8 x half> [[VALUE:%.*]], <vscale x 8 x half>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) {
+  return vsoxei16_v_f16m2_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16.i64(<vscale x 16 x half> [[VALUE:%.*]], <vscale x 16 x half>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei16_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) {
+  return vsoxei16_v_f16m4_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 32 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16.i64(<vscale x 32 x half> [[VALUE:%.*]], <vscale x 32 x half>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei16_v_f16m8_m (vbool2_t mask, _Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) {
+  return vsoxei16_v_f16m8_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[VALUE:%.*]], <vscale x 1 x half>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) {
+  return vsoxei32_v_f16mf4_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[VALUE:%.*]], <vscale x 2 x half>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) {
+  return vsoxei32_v_f16mf2_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[VALUE:%.*]], <vscale x 4 x half>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) {
+  return vsoxei32_v_f16m1_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32.i64(<vscale x 8 x half> [[VALUE:%.*]], <vscale x 8 x half>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) {
+  return vsoxei32_v_f16m2_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32.i64(<vscale x 16 x half> [[VALUE:%.*]], <vscale x 16 x half>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei32_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) {
+  return vsoxei32_v_f16m4_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[VALUE:%.*]], <vscale x 1 x half>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) {
+  return vsoxei64_v_f16mf4_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[VALUE:%.*]], <vscale x 2 x half>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) {
+  return vsoxei64_v_f16mf2_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[VALUE:%.*]], <vscale x 4 x half>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) {
+  return vsoxei64_v_f16m1_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64.i64(<vscale x 8 x half> [[VALUE:%.*]], <vscale x 8 x half>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) {
+  return vsoxei64_v_f16m2_m (mask, base, bindex, value, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxseg.c
index a55904327e3e..c164b89f534f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxseg.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxseg.c
@@ -1,7 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
-// RUN:   -target-feature +experimental-v \
+// RUN:   -target-feature +experimental-zfh -target-feature +experimental-v \
 // RUN:   -target-feature +experimental-zvlsseg -disable-O0-optnone -emit-llvm %s \
 // RUN:   -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
@@ -14263,3 +14263,1784 @@ void test_vsoxseg2ei64_v_f64m4_m (vbool16_t mask, double *base, vuint64m4_t bind
   return vsoxseg2ei64_v_f64m4_m(mask, base, bindex, v0, v1, vl);
 }
 
+// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
+  return vsoxseg2ei8_v_f16mf4(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg3ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
+  return vsoxseg3ei8_v_f16mf4(base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg4ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
+  return vsoxseg4ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg5ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
+  return vsoxseg5ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg6ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
+  return vsoxseg6ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg7ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
+  return vsoxseg7ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], <vscale x 1 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg8ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
+  return vsoxseg8ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
+  return vsoxseg2ei8_v_f16mf2(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg3ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
+  return vsoxseg3ei8_v_f16mf2(base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg4ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
+  return vsoxseg4ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg5ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
+  return vsoxseg5ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg6ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
+  return vsoxseg6ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg7ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
+  return vsoxseg7ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], <vscale x 2 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg8ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
+  return vsoxseg8ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
+  return vsoxseg2ei8_v_f16m1(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg3ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
+  return vsoxseg3ei8_v_f16m1(base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg4ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
+  return vsoxseg4ei8_v_f16m1(base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg5ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
+  return vsoxseg5ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg6ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
+  return vsoxseg6ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg7ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
+  return vsoxseg7ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], <vscale x 4 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg8ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
+  return vsoxseg8ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
+  return vsoxseg2ei8_v_f16m2(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg3ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
+  return vsoxseg3ei8_v_f16m2(base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], <vscale x 8 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg4ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
+  return vsoxseg4ei8_v_f16m2(base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i8.i64(<vscale x 16 x half> [[V0:%.*]], <vscale x 16 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei8_v_f16m4 (_Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
+  return vsoxseg2ei8_v_f16m4(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
+  return vsoxseg2ei16_v_f16mf4(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg3ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
+  return vsoxseg3ei16_v_f16mf4(base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg4ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
+  return vsoxseg4ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg5ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
+  return vsoxseg5ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg6ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
+  return vsoxseg6ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg7ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
+  return vsoxseg7ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], <vscale x 1 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg8ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
+  return vsoxseg8ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
+  return vsoxseg2ei16_v_f16mf2(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg3ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
+  return vsoxseg3ei16_v_f16mf2(base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg4ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
+  return vsoxseg4ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg5ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
+  return vsoxseg5ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg6ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
+  return vsoxseg6ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg7ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
+  return vsoxseg7ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], <vscale x 2 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg8ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
+  return vsoxseg8ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
+  return vsoxseg2ei16_v_f16m1(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg3ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
+  return vsoxseg3ei16_v_f16m1(base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg4ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
+  return vsoxseg4ei16_v_f16m1(base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg5ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
+  return vsoxseg5ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg6ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
+  return vsoxseg6ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg7ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
+  return vsoxseg7ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], <vscale x 4 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg8ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
+  return vsoxseg8ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i16.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
+  return vsoxseg2ei16_v_f16m2(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i16.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg3ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
+  return vsoxseg3ei16_v_f16m2(base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i16.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], <vscale x 8 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg4ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
+  return vsoxseg4ei16_v_f16m2(base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i16.i64(<vscale x 16 x half> [[V0:%.*]], <vscale x 16 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei16_v_f16m4 (_Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
+  return vsoxseg2ei16_v_f16m4(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
+  return vsoxseg2ei32_v_f16mf4(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg3ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
+  return vsoxseg3ei32_v_f16mf4(base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg4ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
+  return vsoxseg4ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg5ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
+  return vsoxseg5ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg6ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
+  return vsoxseg6ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg7ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
+  return vsoxseg7ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], <vscale x 1 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg8ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
+  return vsoxseg8ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
+  return vsoxseg2ei32_v_f16mf2(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg3ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
+  return vsoxseg3ei32_v_f16mf2(base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg4ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
+  return vsoxseg4ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg5ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
+  return vsoxseg5ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg6ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
+  return vsoxseg6ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg7ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
+  return vsoxseg7ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], <vscale x 2 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg8ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
+  return vsoxseg8ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
+  return vsoxseg2ei32_v_f16m1(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg3ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
+  return vsoxseg3ei32_v_f16m1(base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg4ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
+  return vsoxseg4ei32_v_f16m1(base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg5ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
+  return vsoxseg5ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg6ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
+  return vsoxseg6ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg7ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
+  return vsoxseg7ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], <vscale x 4 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg8ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
+  return vsoxseg8ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i32.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
+  return vsoxseg2ei32_v_f16m2(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i32.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg3ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
+  return vsoxseg3ei32_v_f16m2(base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i32.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], <vscale x 8 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg4ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
+  return vsoxseg4ei32_v_f16m2(base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i32.i64(<vscale x 16 x half> [[V0:%.*]], <vscale x 16 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei32_v_f16m4 (_Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
+  return vsoxseg2ei32_v_f16m4(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
+  return vsoxseg2ei64_v_f16mf4(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg3ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
+  return vsoxseg3ei64_v_f16mf4(base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg4ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
+  return vsoxseg4ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg5ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
+  return vsoxseg5ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg6ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
+  return vsoxseg6ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg7ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
+  return vsoxseg7ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], <vscale x 1 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg8ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
+  return vsoxseg8ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
+  return vsoxseg2ei64_v_f16mf2(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg3ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
+  return vsoxseg3ei64_v_f16mf2(base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg4ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
+  return vsoxseg4ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg5ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
+  return vsoxseg5ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg6ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
+  return vsoxseg6ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg7ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
+  return vsoxseg7ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], <vscale x 2 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg8ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
+  return vsoxseg8ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
+  return vsoxseg2ei64_v_f16m1(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg3ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
+  return vsoxseg3ei64_v_f16m1(base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg4ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
+  return vsoxseg4ei64_v_f16m1(base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg5ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
+  return vsoxseg5ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg6ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
+  return vsoxseg6ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg7ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
+  return vsoxseg7ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], <vscale x 4 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg8ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
+  return vsoxseg8ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i64.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
+  return vsoxseg2ei64_v_f16m2(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i64.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg3ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
+  return vsoxseg3ei64_v_f16m2(base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i64.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], <vscale x 8 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg4ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
+  return vsoxseg4ei64_v_f16m2(base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
+  return vsoxseg2ei8_v_f16mf4_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg3ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
+  return vsoxseg3ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg4ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
+  return vsoxseg4ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg5ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
+  return vsoxseg5ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg6ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
+  return vsoxseg6ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg7ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
+  return vsoxseg7ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], <vscale x 1 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg8ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
+  return vsoxseg8ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
+  return vsoxseg2ei8_v_f16mf2_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg3ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
+  return vsoxseg3ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg4ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
+  return vsoxseg4ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg5ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
+  return vsoxseg5ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg6ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
+  return vsoxseg6ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg7ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
+  return vsoxseg7ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], <vscale x 2 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg8ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
+  return vsoxseg8ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
+  return vsoxseg2ei8_v_f16m1_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg3ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
+  return vsoxseg3ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg4ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
+  return vsoxseg4ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg5ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
+  return vsoxseg5ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg6ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
+  return vsoxseg6ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg7ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
+  return vsoxseg7ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], <vscale x 4 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg8ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
+  return vsoxseg8ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
+  return vsoxseg2ei8_v_f16m2_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg3ei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
+  return vsoxseg3ei8_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], <vscale x 8 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg4ei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
+  return vsoxseg4ei8_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i8.i64(<vscale x 16 x half> [[V0:%.*]], <vscale x 16 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei8_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
+  return vsoxseg2ei8_v_f16m4_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
+  return vsoxseg2ei16_v_f16mf4_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg3ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
+  return vsoxseg3ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg4ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
+  return vsoxseg4ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg5ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
+  return vsoxseg5ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg6ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
+  return vsoxseg6ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg7ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
+  return vsoxseg7ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], <vscale x 1 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg8ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
+  return vsoxseg8ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
+  return vsoxseg2ei16_v_f16mf2_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg3ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
+  return vsoxseg3ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg4ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
+  return vsoxseg4ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg5ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
+  return vsoxseg5ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg6ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
+  return vsoxseg6ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg7ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
+  return vsoxseg7ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], <vscale x 2 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg8ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
+  return vsoxseg8ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
+  return vsoxseg2ei16_v_f16m1_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg3ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
+  return vsoxseg3ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg4ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
+  return vsoxseg4ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg5ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
+  return vsoxseg5ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg6ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
+  return vsoxseg6ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg7ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
+  return vsoxseg7ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], <vscale x 4 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg8ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
+  return vsoxseg8ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i16.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
+  return vsoxseg2ei16_v_f16m2_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i16.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg3ei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
+  return vsoxseg3ei16_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i16.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], <vscale x 8 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg4ei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
+  return vsoxseg4ei16_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i16.i64(<vscale x 16 x half> [[V0:%.*]], <vscale x 16 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei16_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
+  return vsoxseg2ei16_v_f16m4_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
+  return vsoxseg2ei32_v_f16mf4_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg3ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
+  return vsoxseg3ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg4ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
+  return vsoxseg4ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg5ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
+  return vsoxseg5ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg6ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
+  return vsoxseg6ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg7ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
+  return vsoxseg7ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], <vscale x 1 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg8ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
+  return vsoxseg8ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
+  return vsoxseg2ei32_v_f16mf2_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg3ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
+  return vsoxseg3ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg4ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
+  return vsoxseg4ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg5ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
+  return vsoxseg5ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg6ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
+  return vsoxseg6ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg7ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
+  return vsoxseg7ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], <vscale x 2 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg8ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
+  return vsoxseg8ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
+  return vsoxseg2ei32_v_f16m1_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg3ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
+  return vsoxseg3ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg4ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
+  return vsoxseg4ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg5ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
+  return vsoxseg5ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg6ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
+  return vsoxseg6ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg7ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
+  return vsoxseg7ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], <vscale x 4 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg8ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
+  return vsoxseg8ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i32.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
+  return vsoxseg2ei32_v_f16m2_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i32.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg3ei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
+  return vsoxseg3ei32_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i32.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], <vscale x 8 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg4ei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
+  return vsoxseg4ei32_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i32.i64(<vscale x 16 x half> [[V0:%.*]], <vscale x 16 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei32_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
+  return vsoxseg2ei32_v_f16m4_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
+  return vsoxseg2ei64_v_f16mf4_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg3ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
+  return vsoxseg3ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg4ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
+  return vsoxseg4ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg5ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
+  return vsoxseg5ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg6ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
+  return vsoxseg6ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg7ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
+  return vsoxseg7ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], <vscale x 1 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg8ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
+  return vsoxseg8ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
+  return vsoxseg2ei64_v_f16mf2_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg3ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
+  return vsoxseg3ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg4ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
+  return vsoxseg4ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg5ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
+  return vsoxseg5ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg6ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
+  return vsoxseg6ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg7ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
+  return vsoxseg7ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], <vscale x 2 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg8ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
+  return vsoxseg8ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
+  return vsoxseg2ei64_v_f16m1_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg3ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
+  return vsoxseg3ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg4ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
+  return vsoxseg4ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg5ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
+  return vsoxseg5ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg6ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
+  return vsoxseg6ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg7ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
+  return vsoxseg7ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], <vscale x 4 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg8ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
+  return vsoxseg8ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i64.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg2ei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
+  return vsoxseg2ei64_v_f16m2_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i64.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg3ei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
+  return vsoxseg3ei64_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i64.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], <vscale x 8 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsoxseg4ei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
+  return vsoxseg4ei64_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsse.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsse.c
index 921d9e2d0b49..b9d3588fee9d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsse.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsse.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -1170,3 +1171,123 @@ void test_vsse64_v_f64m8_m(vbool8_t mask, double *base, ptr
diff _t bstride,
                            vfloat64m8_t value, size_t vl) {
   return vsse64_v_f64m8_m(mask, base, bstride, value, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vsse16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsse.nxv1f16.i64(<vscale x 1 x half> [[VALUE:%.*]], <vscale x 1 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsse16_v_f16mf4 (_Float16 *base, ptr
diff _t bstride, vfloat16mf4_t value, size_t vl) {
+  return vsse16_v_f16mf4 (base, bstride, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsse16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsse.nxv2f16.i64(<vscale x 2 x half> [[VALUE:%.*]], <vscale x 2 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsse16_v_f16mf2 (_Float16 *base, ptr
diff _t bstride, vfloat16mf2_t value, size_t vl) {
+  return vsse16_v_f16mf2 (base, bstride, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsse16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsse.nxv4f16.i64(<vscale x 4 x half> [[VALUE:%.*]], <vscale x 4 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsse16_v_f16m1 (_Float16 *base, ptr
diff _t bstride, vfloat16m1_t value, size_t vl) {
+  return vsse16_v_f16m1 (base, bstride, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsse16_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsse.nxv8f16.i64(<vscale x 8 x half> [[VALUE:%.*]], <vscale x 8 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsse16_v_f16m2 (_Float16 *base, ptr
diff _t bstride, vfloat16m2_t value, size_t vl) {
+  return vsse16_v_f16m2 (base, bstride, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsse16_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsse.nxv16f16.i64(<vscale x 16 x half> [[VALUE:%.*]], <vscale x 16 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsse16_v_f16m4 (_Float16 *base, ptr
diff _t bstride, vfloat16m4_t value, size_t vl) {
+  return vsse16_v_f16m4 (base, bstride, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsse16_v_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 32 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsse.nxv32f16.i64(<vscale x 32 x half> [[VALUE:%.*]], <vscale x 32 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsse16_v_f16m8 (_Float16 *base, ptr
diff _t bstride, vfloat16m8_t value, size_t vl) {
+  return vsse16_v_f16m8 (base, bstride, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsse16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsse.mask.nxv1f16.i64(<vscale x 1 x half> [[VALUE:%.*]], <vscale x 1 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsse16_v_f16mf4_m (vbool64_t mask, _Float16 *base, ptr
diff _t bstride, vfloat16mf4_t value, size_t vl) {
+  return vsse16_v_f16mf4_m (mask, base, bstride, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsse16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsse.mask.nxv2f16.i64(<vscale x 2 x half> [[VALUE:%.*]], <vscale x 2 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsse16_v_f16mf2_m (vbool32_t mask, _Float16 *base, ptr
diff _t bstride, vfloat16mf2_t value, size_t vl) {
+  return vsse16_v_f16mf2_m (mask, base, bstride, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsse16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsse.mask.nxv4f16.i64(<vscale x 4 x half> [[VALUE:%.*]], <vscale x 4 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsse16_v_f16m1_m (vbool16_t mask, _Float16 *base, ptr
diff _t bstride, vfloat16m1_t value, size_t vl) {
+  return vsse16_v_f16m1_m (mask, base, bstride, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsse16_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsse.mask.nxv8f16.i64(<vscale x 8 x half> [[VALUE:%.*]], <vscale x 8 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsse16_v_f16m2_m (vbool8_t mask, _Float16 *base, ptr
diff _t bstride, vfloat16m2_t value, size_t vl) {
+  return vsse16_v_f16m2_m (mask, base, bstride, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsse16_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsse.mask.nxv16f16.i64(<vscale x 16 x half> [[VALUE:%.*]], <vscale x 16 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsse16_v_f16m4_m (vbool4_t mask, _Float16 *base, ptr
diff _t bstride, vfloat16m4_t value, size_t vl) {
+  return vsse16_v_f16m4_m (mask, base, bstride, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsse16_v_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 32 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsse.mask.nxv32f16.i64(<vscale x 32 x half> [[VALUE:%.*]], <vscale x 32 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsse16_v_f16m8_m (vbool2_t mask, _Float16 *base, ptr
diff _t bstride, vfloat16m8_t value, size_t vl) {
+  return vsse16_v_f16m8_m (mask, base, bstride, value, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsseg.c
index e71376c88670..23f08dd4827a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsseg.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsseg.c
@@ -1,7 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
-// RUN:   -target-feature +experimental-v \
+// RUN:   -target-feature +experimental-zfh -target-feature +experimental-v \
 // RUN:   -target-feature +experimental-zvlsseg -disable-O0-optnone -emit-llvm %s \
 // RUN:   -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
@@ -3625,3 +3625,452 @@ void test_vsseg2e64_v_f64m4_m (vbool16_t mask, double *base, vfloat64m4_t v0, vf
   return vsseg2e64_v_f64m4_m(mask, base, v0, v1, vl);
 }
 
+// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg2e16_v_f16mf4 (_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
+  return vsseg2e16_v_f16mf4(base, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg3e16_v_f16mf4 (_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
+  return vsseg3e16_v_f16mf4(base, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg4e16_v_f16mf4 (_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
+  return vsseg4e16_v_f16mf4(base, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg5e16_v_f16mf4 (_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
+  return vsseg5e16_v_f16mf4(base, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg6e16_v_f16mf4 (_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
+  return vsseg6e16_v_f16mf4(base, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg7e16_v_f16mf4 (_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
+  return vsseg7e16_v_f16mf4(base, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], <vscale x 1 x half> [[V7:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg8e16_v_f16mf4 (_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
+  return vsseg8e16_v_f16mf4(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg2e16_v_f16mf2 (_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
+  return vsseg2e16_v_f16mf2(base, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg3e16_v_f16mf2 (_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
+  return vsseg3e16_v_f16mf2(base, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg4e16_v_f16mf2 (_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
+  return vsseg4e16_v_f16mf2(base, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg5e16_v_f16mf2 (_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
+  return vsseg5e16_v_f16mf2(base, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg6e16_v_f16mf2 (_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
+  return vsseg6e16_v_f16mf2(base, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg7e16_v_f16mf2 (_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
+  return vsseg7e16_v_f16mf2(base, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], <vscale x 2 x half> [[V7:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg8e16_v_f16mf2 (_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
+  return vsseg8e16_v_f16mf2(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg2e16_v_f16m1 (_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
+  return vsseg2e16_v_f16m1(base, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg3e16_v_f16m1 (_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
+  return vsseg3e16_v_f16m1(base, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg4e16_v_f16m1 (_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
+  return vsseg4e16_v_f16m1(base, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg5e16_v_f16m1 (_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
+  return vsseg5e16_v_f16m1(base, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg6e16_v_f16m1 (_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
+  return vsseg6e16_v_f16m1(base, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg7e16_v_f16m1 (_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
+  return vsseg7e16_v_f16m1(base, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], <vscale x 4 x half> [[V7:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg8e16_v_f16m1 (_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
+  return vsseg8e16_v_f16m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv8f16.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg2e16_v_f16m2 (_Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
+  return vsseg2e16_v_f16m2(base, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv8f16.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg3e16_v_f16m2 (_Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
+  return vsseg3e16_v_f16m2(base, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv8f16.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], <vscale x 8 x half> [[V3:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg4e16_v_f16m2 (_Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
+  return vsseg4e16_v_f16m2(base, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv16f16.i64(<vscale x 16 x half> [[V0:%.*]], <vscale x 16 x half> [[V1:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg2e16_v_f16m4 (_Float16 *base, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
+  return vsseg2e16_v_f16m4(base, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg2e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
+  return vsseg2e16_v_f16mf4_m(mask, base, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg3e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
+  return vsseg3e16_v_f16mf4_m(mask, base, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg4e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
+  return vsseg4e16_v_f16mf4_m(mask, base, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.mask.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg5e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
+  return vsseg5e16_v_f16mf4_m(mask, base, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.mask.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg6e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
+  return vsseg6e16_v_f16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.mask.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg7e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
+  return vsseg7e16_v_f16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.mask.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], <vscale x 1 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg8e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
+  return vsseg8e16_v_f16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg2e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
+  return vsseg2e16_v_f16mf2_m(mask, base, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg3e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
+  return vsseg3e16_v_f16mf2_m(mask, base, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg4e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
+  return vsseg4e16_v_f16mf2_m(mask, base, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.mask.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg5e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
+  return vsseg5e16_v_f16mf2_m(mask, base, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.mask.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg6e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
+  return vsseg6e16_v_f16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.mask.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg7e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
+  return vsseg7e16_v_f16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.mask.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], <vscale x 2 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg8e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
+  return vsseg8e16_v_f16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg2e16_v_f16m1_m (vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
+  return vsseg2e16_v_f16m1_m(mask, base, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg3e16_v_f16m1_m (vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
+  return vsseg3e16_v_f16m1_m(mask, base, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg4e16_v_f16m1_m (vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
+  return vsseg4e16_v_f16m1_m(mask, base, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.mask.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg5e16_v_f16m1_m (vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
+  return vsseg5e16_v_f16m1_m(mask, base, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.mask.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg6e16_v_f16m1_m (vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
+  return vsseg6e16_v_f16m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.mask.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg7e16_v_f16m1_m (vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
+  return vsseg7e16_v_f16m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.mask.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], <vscale x 4 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg8e16_v_f16m1_m (vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
+  return vsseg8e16_v_f16m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv8f16.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg2e16_v_f16m2_m (vbool8_t mask, _Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
+  return vsseg2e16_v_f16m2_m(mask, base, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv8f16.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg3e16_v_f16m2_m (vbool8_t mask, _Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
+  return vsseg3e16_v_f16m2_m(mask, base, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv8f16.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], <vscale x 8 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg4e16_v_f16m2_m (vbool8_t mask, _Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
+  return vsseg4e16_v_f16m2_m(mask, base, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv16f16.i64(<vscale x 16 x half> [[V0:%.*]], <vscale x 16 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsseg2e16_v_f16m4_m (vbool4_t mask, _Float16 *base, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
+  return vsseg2e16_v_f16m4_m(mask, base, v0, v1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssseg.c
index c85fa8893771..5e8dd8bcfc1a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssseg.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssseg.c
@@ -1,7 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
-// RUN:   -target-feature +experimental-v \
+// RUN:   -target-feature +experimental-zfh -target-feature +experimental-v \
 // RUN:   -target-feature +experimental-zvlsseg -disable-O0-optnone -emit-llvm %s \
 // RUN:   -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
@@ -3625,3 +3625,452 @@ void test_vssseg2e64_v_f64m4_m (vbool16_t mask, double *base, ptr
diff _t bstride,
   return vssseg2e64_v_f64m4_m(mask, base, bstride, v0, v1, vl);
 }
 
+// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg2.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg2e16_v_f16mf4 (_Float16 *base, ptr
diff _t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
+  return vssseg2e16_v_f16mf4(base, bstride, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg3.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg3e16_v_f16mf4 (_Float16 *base, ptr
diff _t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
+  return vssseg3e16_v_f16mf4(base, bstride, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg4.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg4e16_v_f16mf4 (_Float16 *base, ptr
diff _t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
+  return vssseg4e16_v_f16mf4(base, bstride, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg5.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg5e16_v_f16mf4 (_Float16 *base, ptr
diff _t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
+  return vssseg5e16_v_f16mf4(base, bstride, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg6.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg6e16_v_f16mf4 (_Float16 *base, ptr
diff _t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
+  return vssseg6e16_v_f16mf4(base, bstride, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg7.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg7e16_v_f16mf4 (_Float16 *base, ptr
diff _t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
+  return vssseg7e16_v_f16mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg8.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], <vscale x 1 x half> [[V7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg8e16_v_f16mf4 (_Float16 *base, ptr
diff _t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
+  return vssseg8e16_v_f16mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg2.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg2e16_v_f16mf2 (_Float16 *base, ptr
diff _t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
+  return vssseg2e16_v_f16mf2(base, bstride, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg3.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg3e16_v_f16mf2 (_Float16 *base, ptr
diff _t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
+  return vssseg3e16_v_f16mf2(base, bstride, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg4.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg4e16_v_f16mf2 (_Float16 *base, ptr
diff _t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
+  return vssseg4e16_v_f16mf2(base, bstride, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg5.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg5e16_v_f16mf2 (_Float16 *base, ptr
diff _t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
+  return vssseg5e16_v_f16mf2(base, bstride, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg6.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg6e16_v_f16mf2 (_Float16 *base, ptr
diff _t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
+  return vssseg6e16_v_f16mf2(base, bstride, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg7.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg7e16_v_f16mf2 (_Float16 *base, ptr
diff _t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
+  return vssseg7e16_v_f16mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg8.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], <vscale x 2 x half> [[V7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg8e16_v_f16mf2 (_Float16 *base, ptr
diff _t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
+  return vssseg8e16_v_f16mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg2.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg2e16_v_f16m1 (_Float16 *base, ptr
diff _t bstride, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
+  return vssseg2e16_v_f16m1(base, bstride, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg3.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg3e16_v_f16m1 (_Float16 *base, ptr
diff _t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
+  return vssseg3e16_v_f16m1(base, bstride, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg4.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg4e16_v_f16m1 (_Float16 *base, ptr
diff _t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
+  return vssseg4e16_v_f16m1(base, bstride, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg5.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg5e16_v_f16m1 (_Float16 *base, ptr
diff _t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
+  return vssseg5e16_v_f16m1(base, bstride, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg6.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg6e16_v_f16m1 (_Float16 *base, ptr
diff _t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
+  return vssseg6e16_v_f16m1(base, bstride, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg7.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg7e16_v_f16m1 (_Float16 *base, ptr
diff _t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
+  return vssseg7e16_v_f16m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg8.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], <vscale x 4 x half> [[V7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg8e16_v_f16m1 (_Float16 *base, ptr
diff _t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
+  return vssseg8e16_v_f16m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg2.nxv8f16.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg2e16_v_f16m2 (_Float16 *base, ptr
diff _t bstride, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
+  return vssseg2e16_v_f16m2(base, bstride, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg3.nxv8f16.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg3e16_v_f16m2 (_Float16 *base, ptr
diff _t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
+  return vssseg3e16_v_f16m2(base, bstride, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg4.nxv8f16.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], <vscale x 8 x half> [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg4e16_v_f16m2 (_Float16 *base, ptr
diff _t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
+  return vssseg4e16_v_f16m2(base, bstride, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg2.nxv16f16.i64(<vscale x 16 x half> [[V0:%.*]], <vscale x 16 x half> [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg2e16_v_f16m4 (_Float16 *base, ptr
diff _t bstride, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
+  return vssseg2e16_v_f16m4(base, bstride, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg2.mask.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg2e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, ptr
diff _t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
+  return vssseg2e16_v_f16mf4_m(mask, base, bstride, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg3.mask.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg3e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, ptr
diff _t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
+  return vssseg3e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg4.mask.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg4e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, ptr
diff _t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
+  return vssseg4e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg5.mask.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg5e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, ptr
diff _t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
+  return vssseg5e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg6.mask.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg6e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, ptr
diff _t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
+  return vssseg6e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg7.mask.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg7e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, ptr
diff _t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
+  return vssseg7e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg8.mask.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], <vscale x 1 x half> [[V7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg8e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, ptr
diff _t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
+  return vssseg8e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg2.mask.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg2e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, ptr
diff _t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
+  return vssseg2e16_v_f16mf2_m(mask, base, bstride, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg3.mask.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg3e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, ptr
diff _t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
+  return vssseg3e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg4.mask.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg4e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, ptr
diff _t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
+  return vssseg4e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg5.mask.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg5e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, ptr
diff _t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
+  return vssseg5e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg6.mask.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg6e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, ptr
diff _t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
+  return vssseg6e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg7.mask.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg7e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, ptr
diff _t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
+  return vssseg7e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg8.mask.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], <vscale x 2 x half> [[V7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg8e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, ptr
diff _t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
+  return vssseg8e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg2.mask.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg2e16_v_f16m1_m (vbool16_t mask, _Float16 *base, ptr
diff _t bstride, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
+  return vssseg2e16_v_f16m1_m(mask, base, bstride, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg3.mask.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg3e16_v_f16m1_m (vbool16_t mask, _Float16 *base, ptr
diff _t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
+  return vssseg3e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg4.mask.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg4e16_v_f16m1_m (vbool16_t mask, _Float16 *base, ptr
diff _t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
+  return vssseg4e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg5.mask.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg5e16_v_f16m1_m (vbool16_t mask, _Float16 *base, ptr
diff _t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
+  return vssseg5e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg6.mask.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg6e16_v_f16m1_m (vbool16_t mask, _Float16 *base, ptr
diff _t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
+  return vssseg6e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg7.mask.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg7e16_v_f16m1_m (vbool16_t mask, _Float16 *base, ptr
diff _t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
+  return vssseg7e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg8.mask.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], <vscale x 4 x half> [[V7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg8e16_v_f16m1_m (vbool16_t mask, _Float16 *base, ptr
diff _t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
+  return vssseg8e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg2.mask.nxv8f16.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg2e16_v_f16m2_m (vbool8_t mask, _Float16 *base, ptr
diff _t bstride, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
+  return vssseg2e16_v_f16m2_m(mask, base, bstride, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg3.mask.nxv8f16.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg3e16_v_f16m2_m (vbool8_t mask, _Float16 *base, ptr
diff _t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
+  return vssseg3e16_v_f16m2_m(mask, base, bstride, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg4.mask.nxv8f16.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], <vscale x 8 x half> [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg4e16_v_f16m2_m (vbool8_t mask, _Float16 *base, ptr
diff _t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
+  return vssseg4e16_v_f16m2_m(mask, base, bstride, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vssseg2.mask.nxv16f16.i64(<vscale x 16 x half> [[V0:%.*]], <vscale x 16 x half> [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vssseg2e16_v_f16m4_m (vbool4_t mask, _Float16 *base, ptr
diff _t bstride, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
+  return vssseg2e16_v_f16m4_m(mask, base, bstride, v0, v1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxei.c
index b954ec6f3aab..ae5675fa19b8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxei.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxei.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -4224,3 +4225,423 @@ void test_vsuxei64_v_f64m8_m(vbool8_t mask, double *base, vuint64m8_t bindex,
                              vfloat64m8_t value, size_t vl) {
   return vsuxei64_v_f64m8_m(mask, base, bindex, value, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[VALUE:%.*]], <vscale x 1 x half>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) {
+  return vsuxei8_v_f16mf4 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[VALUE:%.*]], <vscale x 2 x half>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) {
+  return vsuxei8_v_f16mf2 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[VALUE:%.*]], <vscale x 4 x half>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) {
+  return vsuxei8_v_f16m1 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[VALUE:%.*]], <vscale x 8 x half>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) {
+  return vsuxei8_v_f16m2 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.nxv16f16.nxv16i8.i64(<vscale x 16 x half> [[VALUE:%.*]], <vscale x 16 x half>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei8_v_f16m4 (_Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) {
+  return vsuxei8_v_f16m4 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 32 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.nxv32f16.nxv32i8.i64(<vscale x 32 x half> [[VALUE:%.*]], <vscale x 32 x half>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei8_v_f16m8 (_Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) {
+  return vsuxei8_v_f16m8 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[VALUE:%.*]], <vscale x 1 x half>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) {
+  return vsuxei16_v_f16mf4 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[VALUE:%.*]], <vscale x 2 x half>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) {
+  return vsuxei16_v_f16mf2 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[VALUE:%.*]], <vscale x 4 x half>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) {
+  return vsuxei16_v_f16m1 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.nxv8f16.nxv8i16.i64(<vscale x 8 x half> [[VALUE:%.*]], <vscale x 8 x half>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) {
+  return vsuxei16_v_f16m2 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.nxv16f16.nxv16i16.i64(<vscale x 16 x half> [[VALUE:%.*]], <vscale x 16 x half>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei16_v_f16m4 (_Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) {
+  return vsuxei16_v_f16m4 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 32 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.nxv32f16.nxv32i16.i64(<vscale x 32 x half> [[VALUE:%.*]], <vscale x 32 x half>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei16_v_f16m8 (_Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) {
+  return vsuxei16_v_f16m8 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[VALUE:%.*]], <vscale x 1 x half>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) {
+  return vsuxei32_v_f16mf4 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[VALUE:%.*]], <vscale x 2 x half>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) {
+  return vsuxei32_v_f16mf2 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[VALUE:%.*]], <vscale x 4 x half>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) {
+  return vsuxei32_v_f16m1 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.nxv8f16.nxv8i32.i64(<vscale x 8 x half> [[VALUE:%.*]], <vscale x 8 x half>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) {
+  return vsuxei32_v_f16m2 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.nxv16f16.nxv16i32.i64(<vscale x 16 x half> [[VALUE:%.*]], <vscale x 16 x half>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei32_v_f16m4 (_Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) {
+  return vsuxei32_v_f16m4 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[VALUE:%.*]], <vscale x 1 x half>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) {
+  return vsuxei64_v_f16mf4 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[VALUE:%.*]], <vscale x 2 x half>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) {
+  return vsuxei64_v_f16mf2 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[VALUE:%.*]], <vscale x 4 x half>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) {
+  return vsuxei64_v_f16m1 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.nxv8f16.nxv8i64.i64(<vscale x 8 x half> [[VALUE:%.*]], <vscale x 8 x half>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) {
+  return vsuxei64_v_f16m2 (base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[VALUE:%.*]], <vscale x 1 x half>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) {
+  return vsuxei8_v_f16mf4_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[VALUE:%.*]], <vscale x 2 x half>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) {
+  return vsuxei8_v_f16mf2_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[VALUE:%.*]], <vscale x 4 x half>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) {
+  return vsuxei8_v_f16m1_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[VALUE:%.*]], <vscale x 8 x half>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) {
+  return vsuxei8_v_f16m2_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8.i64(<vscale x 16 x half> [[VALUE:%.*]], <vscale x 16 x half>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei8_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) {
+  return vsuxei8_v_f16m4_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 32 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8.i64(<vscale x 32 x half> [[VALUE:%.*]], <vscale x 32 x half>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei8_v_f16m8_m (vbool2_t mask, _Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) {
+  return vsuxei8_v_f16m8_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[VALUE:%.*]], <vscale x 1 x half>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) {
+  return vsuxei16_v_f16mf4_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[VALUE:%.*]], <vscale x 2 x half>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) {
+  return vsuxei16_v_f16mf2_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[VALUE:%.*]], <vscale x 4 x half>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) {
+  return vsuxei16_v_f16m1_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16.i64(<vscale x 8 x half> [[VALUE:%.*]], <vscale x 8 x half>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) {
+  return vsuxei16_v_f16m2_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16.i64(<vscale x 16 x half> [[VALUE:%.*]], <vscale x 16 x half>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei16_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) {
+  return vsuxei16_v_f16m4_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 32 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16.i64(<vscale x 32 x half> [[VALUE:%.*]], <vscale x 32 x half>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei16_v_f16m8_m (vbool2_t mask, _Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) {
+  return vsuxei16_v_f16m8_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[VALUE:%.*]], <vscale x 1 x half>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) {
+  return vsuxei32_v_f16mf4_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[VALUE:%.*]], <vscale x 2 x half>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) {
+  return vsuxei32_v_f16mf2_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[VALUE:%.*]], <vscale x 4 x half>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) {
+  return vsuxei32_v_f16m1_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32.i64(<vscale x 8 x half> [[VALUE:%.*]], <vscale x 8 x half>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) {
+  return vsuxei32_v_f16m2_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32.i64(<vscale x 16 x half> [[VALUE:%.*]], <vscale x 16 x half>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei32_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) {
+  return vsuxei32_v_f16m4_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[VALUE:%.*]], <vscale x 1 x half>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) {
+  return vsuxei64_v_f16mf4_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[VALUE:%.*]], <vscale x 2 x half>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) {
+  return vsuxei64_v_f16mf2_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[VALUE:%.*]], <vscale x 4 x half>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) {
+  return vsuxei64_v_f16m1_m (mask, base, bindex, value, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64.i64(<vscale x 8 x half> [[VALUE:%.*]], <vscale x 8 x half>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) {
+  return vsuxei64_v_f16m2_m (mask, base, bindex, value, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxseg.c
index e66674d59881..55bc69acf753 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxseg.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxseg.c
@@ -1,7 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
-// RUN:   -target-feature +experimental-v \
+// RUN:   -target-feature +experimental-zfh -target-feature +experimental-v \
 // RUN:   -target-feature +experimental-zvlsseg -disable-O0-optnone -emit-llvm %s \
 // RUN:   -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
@@ -14263,3 +14263,1784 @@ void test_vsuxseg2ei64_v_f64m4_m (vbool16_t mask, double *base, vuint64m4_t bind
   return vsuxseg2ei64_v_f64m4_m(mask, base, bindex, v0, v1, vl);
 }
 
+// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
+  return vsuxseg2ei8_v_f16mf4(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg3ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
+  return vsuxseg3ei8_v_f16mf4(base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg4ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
+  return vsuxseg4ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg5ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
+  return vsuxseg5ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg6ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
+  return vsuxseg6ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg7ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
+  return vsuxseg7ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], <vscale x 1 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg8ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
+  return vsuxseg8ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
+  return vsuxseg2ei8_v_f16mf2(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg3ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
+  return vsuxseg3ei8_v_f16mf2(base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg4ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
+  return vsuxseg4ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg5ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
+  return vsuxseg5ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg6ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
+  return vsuxseg6ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg7ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
+  return vsuxseg7ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], <vscale x 2 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg8ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
+  return vsuxseg8ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
+  return vsuxseg2ei8_v_f16m1(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg3ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
+  return vsuxseg3ei8_v_f16m1(base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg4ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
+  return vsuxseg4ei8_v_f16m1(base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg5ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
+  return vsuxseg5ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg6ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
+  return vsuxseg6ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg7ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
+  return vsuxseg7ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], <vscale x 4 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg8ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
+  return vsuxseg8ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
+  return vsuxseg2ei8_v_f16m2(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg3ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
+  return vsuxseg3ei8_v_f16m2(base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], <vscale x 8 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg4ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
+  return vsuxseg4ei8_v_f16m2(base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i8.i64(<vscale x 16 x half> [[V0:%.*]], <vscale x 16 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei8_v_f16m4 (_Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
+  return vsuxseg2ei8_v_f16m4(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
+  return vsuxseg2ei16_v_f16mf4(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg3ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
+  return vsuxseg3ei16_v_f16mf4(base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg4ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
+  return vsuxseg4ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg5ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
+  return vsuxseg5ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg6ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
+  return vsuxseg6ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg7ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
+  return vsuxseg7ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], <vscale x 1 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg8ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
+  return vsuxseg8ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
+  return vsuxseg2ei16_v_f16mf2(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg3ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
+  return vsuxseg3ei16_v_f16mf2(base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg4ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
+  return vsuxseg4ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg5ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
+  return vsuxseg5ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg6ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
+  return vsuxseg6ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg7ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
+  return vsuxseg7ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], <vscale x 2 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg8ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
+  return vsuxseg8ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
+  return vsuxseg2ei16_v_f16m1(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg3ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
+  return vsuxseg3ei16_v_f16m1(base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg4ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
+  return vsuxseg4ei16_v_f16m1(base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg5ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
+  return vsuxseg5ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg6ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
+  return vsuxseg6ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg7ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
+  return vsuxseg7ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], <vscale x 4 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg8ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
+  return vsuxseg8ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i16.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
+  return vsuxseg2ei16_v_f16m2(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i16.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg3ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
+  return vsuxseg3ei16_v_f16m2(base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i16.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], <vscale x 8 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg4ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
+  return vsuxseg4ei16_v_f16m2(base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i16.i64(<vscale x 16 x half> [[V0:%.*]], <vscale x 16 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei16_v_f16m4 (_Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
+  return vsuxseg2ei16_v_f16m4(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
+  return vsuxseg2ei32_v_f16mf4(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg3ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
+  return vsuxseg3ei32_v_f16mf4(base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg4ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
+  return vsuxseg4ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg5ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
+  return vsuxseg5ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg6ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
+  return vsuxseg6ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg7ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
+  return vsuxseg7ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], <vscale x 1 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg8ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
+  return vsuxseg8ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
+  return vsuxseg2ei32_v_f16mf2(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg3ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
+  return vsuxseg3ei32_v_f16mf2(base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg4ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
+  return vsuxseg4ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg5ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
+  return vsuxseg5ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg6ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
+  return vsuxseg6ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg7ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
+  return vsuxseg7ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], <vscale x 2 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg8ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
+  return vsuxseg8ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
+  return vsuxseg2ei32_v_f16m1(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg3ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
+  return vsuxseg3ei32_v_f16m1(base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg4ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
+  return vsuxseg4ei32_v_f16m1(base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg5ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
+  return vsuxseg5ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg6ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
+  return vsuxseg6ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg7ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
+  return vsuxseg7ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], <vscale x 4 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg8ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
+  return vsuxseg8ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i32.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
+  return vsuxseg2ei32_v_f16m2(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i32.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg3ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
+  return vsuxseg3ei32_v_f16m2(base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i32.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], <vscale x 8 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg4ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
+  return vsuxseg4ei32_v_f16m2(base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i32.i64(<vscale x 16 x half> [[V0:%.*]], <vscale x 16 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei32_v_f16m4 (_Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
+  return vsuxseg2ei32_v_f16m4(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
+  return vsuxseg2ei64_v_f16mf4(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg3ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
+  return vsuxseg3ei64_v_f16mf4(base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg4ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
+  return vsuxseg4ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg5ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
+  return vsuxseg5ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg6ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
+  return vsuxseg6ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg7ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
+  return vsuxseg7ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], <vscale x 1 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg8ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
+  return vsuxseg8ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
+  return vsuxseg2ei64_v_f16mf2(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg3ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
+  return vsuxseg3ei64_v_f16mf2(base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg4ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
+  return vsuxseg4ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg5ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
+  return vsuxseg5ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg6ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
+  return vsuxseg6ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg7ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
+  return vsuxseg7ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], <vscale x 2 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg8ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
+  return vsuxseg8ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
+  return vsuxseg2ei64_v_f16m1(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg3ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
+  return vsuxseg3ei64_v_f16m1(base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg4ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
+  return vsuxseg4ei64_v_f16m1(base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg5ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
+  return vsuxseg5ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg6ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
+  return vsuxseg6ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg7ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
+  return vsuxseg7ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], <vscale x 4 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg8ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
+  return vsuxseg8ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i64.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
+  return vsuxseg2ei64_v_f16m2(base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i64.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg3ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
+  return vsuxseg3ei64_v_f16m2(base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i64.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], <vscale x 8 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg4ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
+  return vsuxseg4ei64_v_f16m2(base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
+  return vsuxseg2ei8_v_f16mf4_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg3ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
+  return vsuxseg3ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg4ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
+  return vsuxseg4ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg5ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
+  return vsuxseg5ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg6ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
+  return vsuxseg6ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg7ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
+  return vsuxseg7ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], <vscale x 1 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg8ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
+  return vsuxseg8ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
+  return vsuxseg2ei8_v_f16mf2_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg3ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
+  return vsuxseg3ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg4ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
+  return vsuxseg4ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg5ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
+  return vsuxseg5ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg6ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
+  return vsuxseg6ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg7ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
+  return vsuxseg7ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], <vscale x 2 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg8ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
+  return vsuxseg8ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
+  return vsuxseg2ei8_v_f16m1_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg3ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
+  return vsuxseg3ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg4ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
+  return vsuxseg4ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg5ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
+  return vsuxseg5ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg6ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
+  return vsuxseg6ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg7ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
+  return vsuxseg7ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], <vscale x 4 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg8ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
+  return vsuxseg8ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
+  return vsuxseg2ei8_v_f16m2_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg3ei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
+  return vsuxseg3ei8_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], <vscale x 8 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg4ei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
+  return vsuxseg4ei8_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i8.i64(<vscale x 16 x half> [[V0:%.*]], <vscale x 16 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei8_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
+  return vsuxseg2ei8_v_f16m4_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
+  return vsuxseg2ei16_v_f16mf4_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg3ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
+  return vsuxseg3ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg4ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
+  return vsuxseg4ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg5ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
+  return vsuxseg5ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg6ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
+  return vsuxseg6ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg7ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
+  return vsuxseg7ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], <vscale x 1 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg8ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
+  return vsuxseg8ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
+  return vsuxseg2ei16_v_f16mf2_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg3ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
+  return vsuxseg3ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg4ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
+  return vsuxseg4ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg5ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
+  return vsuxseg5ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg6ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
+  return vsuxseg6ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg7ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
+  return vsuxseg7ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], <vscale x 2 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg8ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
+  return vsuxseg8ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
+  return vsuxseg2ei16_v_f16m1_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg3ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
+  return vsuxseg3ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg4ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
+  return vsuxseg4ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg5ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
+  return vsuxseg5ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg6ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
+  return vsuxseg6ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg7ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
+  return vsuxseg7ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], <vscale x 4 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg8ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
+  return vsuxseg8ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i16.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
+  return vsuxseg2ei16_v_f16m2_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i16.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg3ei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
+  return vsuxseg3ei16_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i16.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], <vscale x 8 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg4ei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
+  return vsuxseg4ei16_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i16.i64(<vscale x 16 x half> [[V0:%.*]], <vscale x 16 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei16_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
+  return vsuxseg2ei16_v_f16m4_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
+  return vsuxseg2ei32_v_f16mf4_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg3ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
+  return vsuxseg3ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg4ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
+  return vsuxseg4ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg5ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
+  return vsuxseg5ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg6ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
+  return vsuxseg6ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg7ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
+  return vsuxseg7ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i32.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], <vscale x 1 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg8ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
+  return vsuxseg8ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
+  return vsuxseg2ei32_v_f16mf2_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg3ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
+  return vsuxseg3ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg4ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
+  return vsuxseg4ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg5ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
+  return vsuxseg5ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg6ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
+  return vsuxseg6ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg7ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
+  return vsuxseg7ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i32.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], <vscale x 2 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg8ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
+  return vsuxseg8ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
+  return vsuxseg2ei32_v_f16m1_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg3ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
+  return vsuxseg3ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg4ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
+  return vsuxseg4ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg5ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
+  return vsuxseg5ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg6ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
+  return vsuxseg6ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg7ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
+  return vsuxseg7ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i32.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], <vscale x 4 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg8ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
+  return vsuxseg8ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i32.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
+  return vsuxseg2ei32_v_f16m2_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i32.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg3ei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
+  return vsuxseg3ei32_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i32.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], <vscale x 8 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg4ei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
+  return vsuxseg4ei32_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i32.i64(<vscale x 16 x half> [[V0:%.*]], <vscale x 16 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei32_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
+  return vsuxseg2ei32_v_f16m4_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
+  return vsuxseg2ei64_v_f16mf4_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg3ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
+  return vsuxseg3ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg4ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
+  return vsuxseg4ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg5ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
+  return vsuxseg5ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg6ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
+  return vsuxseg6ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg7ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
+  return vsuxseg7ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i64.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], <vscale x 1 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg8ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
+  return vsuxseg8ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
+  return vsuxseg2ei64_v_f16mf2_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg3ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
+  return vsuxseg3ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg4ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
+  return vsuxseg4ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg5ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
+  return vsuxseg5ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg6ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
+  return vsuxseg6ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg7ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
+  return vsuxseg7ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i64.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], <vscale x 2 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg8ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
+  return vsuxseg8ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
+  return vsuxseg2ei64_v_f16m1_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg3ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
+  return vsuxseg3ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg4ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
+  return vsuxseg4ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg5ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
+  return vsuxseg5ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg6ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
+  return vsuxseg6ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg7ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
+  return vsuxseg7ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i64.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], <vscale x 4 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg8ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
+  return vsuxseg8ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i64.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg2ei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
+  return vsuxseg2ei64_v_f16m2_m(mask, base, bindex, v0, v1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i64.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg3ei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
+  return vsuxseg3ei64_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i64.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], <vscale x 8 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vsuxseg4ei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
+  return vsuxseg4ei64_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vundefined.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vundefined.c
index 503849942e5e..de840414c541 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vundefined.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vundefined.c
@@ -1,6 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <riscv_vector.h>
@@ -322,3 +323,51 @@ vfloat64m4_t test_vundefined_f64m4() { return vundefined_f64m4(); }
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> undef
 //
 vfloat64m8_t test_vundefined_f64m8() { return vundefined_f64m8(); }
+
+// CHECK-RV64-LABEL: @test_vundefined_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> undef
+//
+vfloat16mf4_t test_vundefined_f16mf4 () {
+  return vundefined_f16mf4();
+}
+
+// CHECK-RV64-LABEL: @test_vundefined_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> undef
+//
+vfloat16mf2_t test_vundefined_f16mf2 () {
+  return vundefined_f16mf2();
+}
+
+// CHECK-RV64-LABEL: @test_vundefined_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> undef
+//
+vfloat16m1_t test_vundefined_f16m1 () {
+  return vundefined_f16m1();
+}
+
+// CHECK-RV64-LABEL: @test_vundefined_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> undef
+//
+vfloat16m2_t test_vundefined_f16m2 () {
+  return vundefined_f16m2();
+}
+
+// CHECK-RV64-LABEL: @test_vundefined_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> undef
+//
+vfloat16m4_t test_vundefined_f16m4 () {
+  return vundefined_f16m4();
+}
+
+// CHECK-RV64-LABEL: @test_vundefined_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> undef
+//
+vfloat16m8_t test_vundefined_f16m8 () {
+  return vundefined_f16m8();
+}


        


More information about the cfe-commits mailing list