[clang] [RISCV] Support floating point VCIX (PR #67094)

Brandon Wu via cfe-commits cfe-commits at lists.llvm.org
Fri Sep 22 00:05:24 PDT 2023


https://github.com/4vtomat created https://github.com/llvm/llvm-project/pull/67094

None

>From c6d4731f303246dda31a2637f1e01bc96de3bd0d Mon Sep 17 00:00:00 2001
From: Brandon Wu <brandon.wu at sifive.com>
Date: Wed, 20 Sep 2023 11:00:52 -0700
Subject: [PATCH] [RISCV] Support floating point VCIX

---
 .../clang/Basic/riscv_sifive_vector.td        |   62 +-
 .../non-policy/non-overloaded/xsfvcp-x-rv64.c |  110 +-
 .../non-policy/non-overloaded/xsfvcp-x.c      | 1096 +++++++-
 .../non-overloaded/xsfvcp-xv-rv64.c           |  108 +
 .../non-policy/non-overloaded/xsfvcp-xv.c     | 2350 +++++++++++++++-
 .../non-overloaded/xsfvcp-xvv-rv64.c          |  110 +-
 .../non-policy/non-overloaded/xsfvcp-xvv.c    | 2356 ++++++++++++++++-
 .../non-policy/non-overloaded/xsfvcp-xvw.c    | 1516 ++++++++++-
 llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td    |   54 +-
 llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll       |  394 ++-
 llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv.ll      | 2188 ++++++++++++++-
 llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll     | 2269 +++++++++++++++-
 llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvw.ll     | 1408 +++++++++-
 13 files changed, 13937 insertions(+), 84 deletions(-)

diff --git a/clang/include/clang/Basic/riscv_sifive_vector.td b/clang/include/clang/Basic/riscv_sifive_vector.td
index 6583a7eb7b2e59b..d3b9e79a5b6fa69 100644
--- a/clang/include/clang/Basic/riscv_sifive_vector.td
+++ b/clang/include/clang/Basic/riscv_sifive_vector.td
@@ -19,10 +19,10 @@ include "riscv_vector_common.td"
 //===----------------------------------------------------------------------===//
 
 class VCIXSuffix<string range> {
-  list<string> suffix = !cond(!eq(range, "c"): ["8mf8", "8mf4", "8mf2", "8m1", "8m2", "8m4", "8m8"],
-                              !eq(range, "s"): ["16mf4", "16mf2", "16m1", "16m2", "16m4", "16m8"],
-                              !eq(range, "i"): ["32mf2", "32m1", "32m2", "32m4", "32m8"],
-                              !eq(range, "l"): ["64m1", "64m2", "64m4", "64m8"]);
+  list<string> suffix = !cond(!eq(range, "c"):                       ["8mf8", "8mf4", "8mf2", "8m1", "8m2", "8m4", "8m8"],
+                              !or(!eq(range, "s"), !eq(range, "x")): ["16mf4", "16mf2", "16m1", "16m2", "16m4", "16m8"],
+                              !or(!eq(range, "i"), !eq(range, "f")): ["32mf2", "32m1", "32m2", "32m4", "32m8"],
+                              !or(!eq(range, "l"), !eq(range, "d")): ["64m1", "64m2", "64m4", "64m8"]);
 }
 
 class VCIXBuiltinSet<string name, string IR_name, string suffix,
@@ -47,16 +47,22 @@ multiclass VCIXBuiltinSet<string name, string IR_name, string suffix,
 
 multiclass RVVVCIXBuiltinSet<list<string> range, string prototype,
                              list<int> intrinsic_types, bit UseGPR> {
-  foreach r = range in
-    let RequiredFeatures = !if(!and(UseGPR, !eq(r, "l")),
+  foreach r = range in {
+    defvar suffix = !if(!or(!eq(r, "xf"), !eq(r, "d")), "Fv", "Uv");
+    let RequiredFeatures = !if(!and(UseGPR, !or(!eq(r, "l"), !eq(r, "d"))),
                                ["Xsfvcp", "RV64"], ["Xsfvcp"]) in
-      defm : VCIXBuiltinSet<NAME, NAME, "Uv", prototype, r, intrinsic_types>;
+      defm : VCIXBuiltinSet<NAME, NAME, suffix, prototype, r, intrinsic_types>;
+  }
 }
 
 multiclass RVVVCIXBuiltinSetWVType<list<string> range, string prototype,
                              list<int> intrinsic_types, bit UseGPR> {
-  foreach r = range in
-    let RequiredFeatures = !if(!and(UseGPR, !eq(r, "l")),
+  foreach r = range in {
+    defvar t = !cond(!eq(r, "x"): "f",
+                     !eq(r, "f"): "f",
+                     !eq(r, "d"): "f",
+                     !ne(r, ""): "u");
+    let RequiredFeatures = !if(!and(UseGPR, !or(!eq(r, "l"), !eq(r, "d"))),
                                ["Xsfvcp", "RV64"], ["Xsfvcp"]) in
       // These intrinsics don't have any vector types in the output and inputs,
       // but we still need to add vetvli for them. So we encode different
@@ -67,8 +73,9 @@ multiclass RVVVCIXBuiltinSetWVType<list<string> range, string prototype,
         // Log2LMUL to zero.  Otherwise the RISCVVEmitter will expand
         // lots of redundant intrinsic but have same names.
         let Log2LMUL = [0] in
-          def : VCIXBuiltinSet<NAME # "_u" # s, NAME # "_e" # s,
+          def : VCIXBuiltinSet<NAME # "_" # t # s, NAME # "_e" # s,
                                "", prototype, r, intrinsic_types>;
+  }
 }
 
 let SupportOverloading = false in {
@@ -92,6 +99,29 @@ let SupportOverloading = false in {
   defm sf_vc_v_ivv : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvUvKz", [-1, 0, 3], UseGPR=0>;
   defm sf_vc_v_vvv : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvUvUv", [-1, 0, 3], UseGPR=0>;
   defm sf_vc_v_fvv : RVVVCIXBuiltinSet<["si",  "l"], "UvKzUvUvFe", [-1, 0, 3], UseGPR=0>;
+
+  // Float
+  defm sf_vc_x_se  : RVVVCIXBuiltinSetWVType<["x", "f", "d"], "0KzKzKzUe", [0, 3], UseGPR=1>;
+  defm sf_vc_i_se  : RVVVCIXBuiltinSetWVType<["x", "f", "d"], "0KzKzKzKz", [2, 3], UseGPR=0>;
+  defm sf_vc_xv    : RVVVCIXBuiltinSet<["xf", "d"], "0KzKzFvUe",  [0, 2, 3],  UseGPR=1>;
+  defm sf_vc_iv    : RVVVCIXBuiltinSet<["xf", "d"], "0KzKzFvKz",  [0, 2, 3],  UseGPR=0>;
+  defm sf_vc_vv    : RVVVCIXBuiltinSet<["xf", "d"], "0KzKzFvFv",  [0, 2, 3],  UseGPR=0>;
+  defm sf_vc_fv    : RVVVCIXBuiltinSet<["xf",  "d"], "0KzKzFvFe",  [0, 2, 3],  UseGPR=0>;
+  defm sf_vc_xvv   : RVVVCIXBuiltinSet<["xf", "d"], "0KzFvFvUe",  [0, 1, 3],  UseGPR=1>;
+  defm sf_vc_ivv   : RVVVCIXBuiltinSet<["xf", "d"], "0KzFvFvKz",  [0, 1, 3],  UseGPR=0>;
+  defm sf_vc_vvv   : RVVVCIXBuiltinSet<["xf", "d"], "0KzFvFvFv",  [0, 1, 3],  UseGPR=0>;
+  defm sf_vc_fvv   : RVVVCIXBuiltinSet<["xf",  "d"], "0KzFvFvFe",  [0, 1, 3],  UseGPR=0>;
+  defm sf_vc_v_x   : RVVVCIXBuiltinSet<["xf", "d"], "FvKzKzUe",   [-1, 1, 2], UseGPR=1>;
+  defm sf_vc_v_i   : RVVVCIXBuiltinSet<["xf", "d"], "FvKzKzKz",   [-1, 1, 2], UseGPR=0>;
+  defm sf_vc_v_xv  : RVVVCIXBuiltinSet<["xf", "d"], "FvKzFvUe",   [-1, 0, 2], UseGPR=1>;
+  defm sf_vc_v_iv  : RVVVCIXBuiltinSet<["xf", "d"], "FvKzFvKz",   [-1, 0, 2], UseGPR=0>;
+  defm sf_vc_v_vv  : RVVVCIXBuiltinSet<["xf", "d"], "FvKzFvFv",   [-1, 0, 2], UseGPR=0>;
+  defm sf_vc_v_fv  : RVVVCIXBuiltinSet<["xf",  "d"], "FvKzFvFe",   [-1, 0, 2], UseGPR=0>;
+  defm sf_vc_v_xvv : RVVVCIXBuiltinSet<["xf", "d"], "FvKzFvFvUe", [-1, 0, 3], UseGPR=1>;
+  defm sf_vc_v_ivv : RVVVCIXBuiltinSet<["xf", "d"], "FvKzFvFvKz", [-1, 0, 3], UseGPR=0>;
+  defm sf_vc_v_vvv : RVVVCIXBuiltinSet<["xf", "d"], "FvKzFvFvFv", [-1, 0, 3], UseGPR=0>;
+  defm sf_vc_v_fvv : RVVVCIXBuiltinSet<["xf",  "d"], "FvKzFvFvFe", [-1, 0, 3], UseGPR=0>;
+
   let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
     defm sf_vc_xvw   : RVVVCIXBuiltinSet<["csi"], "0KzUwUvUe",  [0, 1, 2, 3],  UseGPR=1>;
     defm sf_vc_ivw   : RVVVCIXBuiltinSet<["csi"], "0KzUwUvKz",  [0, 1, 2, 3],  UseGPR=0>;
@@ -102,4 +132,16 @@ let SupportOverloading = false in {
     defm sf_vc_v_vvw : RVVVCIXBuiltinSet<["csi"], "UwKzUwUvUv", [-1, 0, 2, 3], UseGPR=0>;
     defm sf_vc_v_fvw : RVVVCIXBuiltinSet<["si"],  "UwKzUwUvFe", [-1, 0, 2, 3], UseGPR=0>;
   }
+
+  // Float
+  let Log2LMUL = [-2, -1, 0, 1, 2] in {
+    defm sf_vc_xvw   : RVVVCIXBuiltinSet<["xf"], "0KzFwFvUe",  [0, 1, 2, 3],  UseGPR=1>;
+    defm sf_vc_ivw   : RVVVCIXBuiltinSet<["xf"], "0KzFwFvKz",  [0, 1, 2, 3],  UseGPR=0>;
+    defm sf_vc_vvw   : RVVVCIXBuiltinSet<["xf"], "0KzFwFvFv",  [0, 1, 2, 3],  UseGPR=0>;
+    defm sf_vc_fvw   : RVVVCIXBuiltinSet<["xf"],  "0KzFwFvFe",  [0, 1, 2, 3],  UseGPR=0>;
+    defm sf_vc_v_xvw : RVVVCIXBuiltinSet<["xf"], "FwKzFwFvUe", [-1, 0, 2, 3], UseGPR=1>;
+    defm sf_vc_v_ivw : RVVVCIXBuiltinSet<["xf"], "FwKzFwFvKz", [-1, 0, 2, 3], UseGPR=0>;
+    defm sf_vc_v_vvw : RVVVCIXBuiltinSet<["xf"], "FwKzFwFvFv", [-1, 0, 2, 3], UseGPR=0>;
+    defm sf_vc_v_fvw : RVVVCIXBuiltinSet<["xf"],  "FwKzFwFvFe", [-1, 0, 2, 3], UseGPR=0>;
+  }
 }
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-x-rv64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-x-rv64.c
index fe6972dc7942f16..50af89e24e33b62 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-x-rv64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-x-rv64.c
@@ -1,6 +1,6 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvfh -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <sifive_vector.h>
 
@@ -115,3 +115,111 @@ vuint64m4_t test_sf_vc_v_x_u64m4(uint64_t rs1, size_t vl) {
 vuint64m8_t test_sf_vc_v_x_u64m8(uint64_t rs1, size_t vl) {
   return __riscv_sf_vc_v_x_u64m8(p27_26, p24_20, rs1, vl);
 }
+
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e64m1.i64.i64.i64(i64 3, i64 31, i64 31, i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_f64m1(uint64_t rs1, size_t vl) {
+  __riscv_sf_vc_x_se_f64m1(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e64m2.i64.i64.i64(i64 3, i64 31, i64 31, i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_f64m2(uint64_t rs1, size_t vl) {
+  __riscv_sf_vc_x_se_f64m2(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e64m4.i64.i64.i64(i64 3, i64 31, i64 31, i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_f64m4(uint64_t rs1, size_t vl) {
+  __riscv_sf_vc_x_se_f64m4(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e64m8.i64.i64.i64(i64 3, i64 31, i64 31, i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_f64m8(uint64_t rs1, size_t vl) {
+  __riscv_sf_vc_x_se_f64m8(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.x.se.nxv1f64.i64.i64.i64(i64 3, i64 31, i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_sf_vc_v_x_se_f64m1(uint64_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_x_se_f64m1(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.x.se.nxv2f64.i64.i64.i64(i64 3, i64 31, i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_sf_vc_v_x_se_f64m2(uint64_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_x_se_f64m2(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.x.se.nxv4f64.i64.i64.i64(i64 3, i64 31, i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_sf_vc_v_x_se_f64m4(uint64_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_x_se_f64m4(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.x.se.nxv8f64.i64.i64.i64(i64 3, i64 31, i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_sf_vc_v_x_se_f64m8(uint64_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_x_se_f64m8(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.x.nxv1f64.i64.i64.i64(i64 3, i64 31, i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_sf_vc_v_x_f64m1(uint64_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_x_f64m1(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.x.nxv2f64.i64.i64.i64(i64 3, i64 31, i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_sf_vc_v_x_f64m2(uint64_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_x_f64m2(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.x.nxv4f64.i64.i64.i64(i64 3, i64 31, i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_sf_vc_v_x_f64m4(uint64_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_x_f64m4(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.x.nxv8f64.i64.i64.i64(i64 3, i64 31, i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_sf_vc_v_x_f64m8(uint64_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_x_f64m8(p27_26, p24_20, rs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-x.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-x.c
index bac7e019c35b199..84fb59c01e36652 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-x.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-x.c
@@ -1,7 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv32 -target-feature +v -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
-// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv32 -target-feature +v -target-feature +zvfh -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvfh -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <sifive_vector.h>
 
@@ -1689,3 +1689,1095 @@ vuint64m4_t test_sf_vc_v_i_u64m4(size_t vl) {
 vuint64m8_t test_sf_vc_v_i_u64m8(size_t vl) {
   return __riscv_sf_vc_v_i_u64m8(p27_26, p24_20, simm5, vl);
 }
+
+// CHECK-RV32-LABEL: @test_sf_vc_x_se_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16mf4.i32.i16.i32(i32 3, i32 31, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16mf4.i64.i16.i64(i64 3, i64 31, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_f16mf4(uint16_t rs1, size_t vl) {
+  __riscv_sf_vc_x_se_f16mf4(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_x_se_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16mf2.i32.i16.i32(i32 3, i32 31, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16mf2.i64.i16.i64(i64 3, i64 31, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_f16mf2(uint16_t rs1, size_t vl) {
+  __riscv_sf_vc_x_se_f16mf2(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_x_se_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16m1.i32.i16.i32(i32 3, i32 31, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16m1.i64.i16.i64(i64 3, i64 31, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_f16m1(uint16_t rs1, size_t vl) {
+  __riscv_sf_vc_x_se_f16m1(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_x_se_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16m2.i32.i16.i32(i32 3, i32 31, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16m2.i64.i16.i64(i64 3, i64 31, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_f16m2(uint16_t rs1, size_t vl) {
+  __riscv_sf_vc_x_se_f16m2(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_x_se_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16m4.i32.i16.i32(i32 3, i32 31, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16m4.i64.i16.i64(i64 3, i64 31, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_f16m4(uint16_t rs1, size_t vl) {
+  __riscv_sf_vc_x_se_f16m4(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_x_se_f16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16m8.i32.i16.i32(i32 3, i32 31, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16m8.i64.i16.i64(i64 3, i64 31, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_f16m8(uint16_t rs1, size_t vl) {
+  __riscv_sf_vc_x_se_f16m8(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_x_se_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e32mf2.i32.i32.i32(i32 3, i32 31, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e32mf2.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_f32mf2(uint32_t rs1, size_t vl) {
+  __riscv_sf_vc_x_se_f32mf2(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_x_se_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e32m1.i32.i32.i32(i32 3, i32 31, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e32m1.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_f32m1(uint32_t rs1, size_t vl) {
+  __riscv_sf_vc_x_se_f32m1(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_x_se_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e32m2.i32.i32.i32(i32 3, i32 31, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e32m2.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_f32m2(uint32_t rs1, size_t vl) {
+  __riscv_sf_vc_x_se_f32m2(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_x_se_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e32m4.i32.i32.i32(i32 3, i32 31, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e32m4.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_f32m4(uint32_t rs1, size_t vl) {
+  __riscv_sf_vc_x_se_f32m4(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_x_se_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e32m8.i32.i32.i32(i32 3, i32 31, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e32m8.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_f32m8(uint32_t rs1, size_t vl) {
+  __riscv_sf_vc_x_se_f32m8(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_se_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.x.se.nxv1f16.i32.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.x.se.nxv1f16.i64.i16.i64(i64 3, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_sf_vc_v_x_se_f16mf4(uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_x_se_f16mf4(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_se_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.x.se.nxv2f16.i32.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.x.se.nxv2f16.i64.i16.i64(i64 3, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_sf_vc_v_x_se_f16mf2(uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_x_se_f16mf2(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_se_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.x.se.nxv4f16.i32.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.x.se.nxv4f16.i64.i16.i64(i64 3, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_sf_vc_v_x_se_f16m1(uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_x_se_f16m1(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_se_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.x.se.nxv8f16.i32.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.x.se.nxv8f16.i64.i16.i64(i64 3, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_sf_vc_v_x_se_f16m2(uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_x_se_f16m2(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_se_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.x.se.nxv16f16.i32.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.x.se.nxv16f16.i64.i16.i64(i64 3, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_sf_vc_v_x_se_f16m4(uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_x_se_f16m4(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_se_f16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.x.se.nxv32f16.i32.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.x.se.nxv32f16.i64.i16.i64(i64 3, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_sf_vc_v_x_se_f16m8(uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_x_se_f16m8(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_se_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.x.se.nxv1f32.i32.i32.i32(i32 3, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.x.se.nxv1f32.i64.i32.i64(i64 3, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_sf_vc_v_x_se_f32mf2(uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_x_se_f32mf2(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_se_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.x.se.nxv2f32.i32.i32.i32(i32 3, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.x.se.nxv2f32.i64.i32.i64(i64 3, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_sf_vc_v_x_se_f32m1(uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_x_se_f32m1(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_se_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.x.se.nxv4f32.i32.i32.i32(i32 3, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.x.se.nxv4f32.i64.i32.i64(i64 3, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_sf_vc_v_x_se_f32m2(uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_x_se_f32m2(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_se_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.x.se.nxv8f32.i32.i32.i32(i32 3, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.x.se.nxv8f32.i64.i32.i64(i64 3, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_sf_vc_v_x_se_f32m4(uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_x_se_f32m4(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_se_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.x.se.nxv16f32.i32.i32.i32(i32 3, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.x.se.nxv16f32.i64.i32.i64(i64 3, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_sf_vc_v_x_se_f32m8(uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_x_se_f32m8(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.x.nxv1f16.i32.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.x.nxv1f16.i64.i16.i64(i64 3, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_sf_vc_v_x_f16mf4(uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_x_f16mf4(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.x.nxv2f16.i32.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.x.nxv2f16.i64.i16.i64(i64 3, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_sf_vc_v_x_f16mf2(uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_x_f16mf2(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.x.nxv4f16.i32.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.x.nxv4f16.i64.i16.i64(i64 3, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_sf_vc_v_x_f16m1(uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_x_f16m1(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.x.nxv8f16.i32.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.x.nxv8f16.i64.i16.i64(i64 3, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_sf_vc_v_x_f16m2(uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_x_f16m2(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.x.nxv16f16.i32.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.x.nxv16f16.i64.i16.i64(i64 3, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_sf_vc_v_x_f16m4(uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_x_f16m4(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_f16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.x.nxv32f16.i32.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.x.nxv32f16.i64.i16.i64(i64 3, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_sf_vc_v_x_f16m8(uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_x_f16m8(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.x.nxv1f32.i32.i32.i32(i32 3, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.x.nxv1f32.i64.i32.i64(i64 3, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_sf_vc_v_x_f32mf2(uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_x_f32mf2(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.x.nxv2f32.i32.i32.i32(i32 3, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.x.nxv2f32.i64.i32.i64(i64 3, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_sf_vc_v_x_f32m1(uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_x_f32m1(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.x.nxv4f32.i32.i32.i32(i32 3, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.x.nxv4f32.i64.i32.i64(i64 3, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_sf_vc_v_x_f32m2(uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_x_f32m2(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.x.nxv8f32.i32.i32.i32(i32 3, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.x.nxv8f32.i64.i32.i64(i64 3, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_sf_vc_v_x_f32m4(uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_x_f32m4(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.x.nxv16f32.i32.i32.i32(i32 3, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.x.nxv16f32.i64.i32.i64(i64 3, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_sf_vc_v_x_f32m8(uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_x_f32m8(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16mf4.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16mf4.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_f16mf4(size_t vl) {
+  __riscv_sf_vc_i_se_f16mf4(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16mf2.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16mf2.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_f16mf2(size_t vl) {
+  __riscv_sf_vc_i_se_f16mf2(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16m1.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16m1.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_f16m1(size_t vl) {
+  __riscv_sf_vc_i_se_f16m1(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16m2.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16m2.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_f16m2(size_t vl) {
+  __riscv_sf_vc_i_se_f16m2(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16m4.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16m4.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_f16m4(size_t vl) {
+  __riscv_sf_vc_i_se_f16m4(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_f16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16m8.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16m8.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_f16m8(size_t vl) {
+  __riscv_sf_vc_i_se_f16m8(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e32mf2.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e32mf2.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_f32mf2(size_t vl) {
+  __riscv_sf_vc_i_se_f32mf2(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e32m1.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e32m1.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_f32m1(size_t vl) {
+  __riscv_sf_vc_i_se_f32m1(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e32m2.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e32m2.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_f32m2(size_t vl) {
+  __riscv_sf_vc_i_se_f32m2(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e32m4.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e32m4.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_f32m4(size_t vl) {
+  __riscv_sf_vc_i_se_f32m4(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e32m8.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e32m8.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_f32m8(size_t vl) {
+  __riscv_sf_vc_i_se_f32m8(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e64m1.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e64m1.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_f64m1(size_t vl) {
+  __riscv_sf_vc_i_se_f64m1(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_f64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e64m2.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e64m2.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_f64m2(size_t vl) {
+  __riscv_sf_vc_i_se_f64m2(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_f64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e64m4.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e64m4.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_f64m4(size_t vl) {
+  __riscv_sf_vc_i_se_f64m4(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_f64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e64m8.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e64m8.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_f64m8(size_t vl) {
+  __riscv_sf_vc_i_se_f64m8(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.i.se.nxv1f16.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.i.se.nxv1f16.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_sf_vc_v_i_se_f16mf4(size_t vl) {
+  return __riscv_sf_vc_v_i_se_f16mf4(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.i.se.nxv2f16.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.i.se.nxv2f16.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_sf_vc_v_i_se_f16mf2(size_t vl) {
+  return __riscv_sf_vc_v_i_se_f16mf2(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.i.se.nxv4f16.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.i.se.nxv4f16.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_sf_vc_v_i_se_f16m1(size_t vl) {
+  return __riscv_sf_vc_v_i_se_f16m1(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.i.se.nxv8f16.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.i.se.nxv8f16.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_sf_vc_v_i_se_f16m2(size_t vl) {
+  return __riscv_sf_vc_v_i_se_f16m2(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.i.se.nxv16f16.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.i.se.nxv16f16.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_sf_vc_v_i_se_f16m4(size_t vl) {
+  return __riscv_sf_vc_v_i_se_f16m4(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_f16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.i.se.nxv32f16.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.i.se.nxv32f16.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_sf_vc_v_i_se_f16m8(size_t vl) {
+  return __riscv_sf_vc_v_i_se_f16m8(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.i.se.nxv1f32.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.i.se.nxv1f32.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_sf_vc_v_i_se_f32mf2(size_t vl) {
+  return __riscv_sf_vc_v_i_se_f32mf2(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.i.se.nxv2f32.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.i.se.nxv2f32.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_sf_vc_v_i_se_f32m1(size_t vl) {
+  return __riscv_sf_vc_v_i_se_f32m1(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.i.se.nxv4f32.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.i.se.nxv4f32.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_sf_vc_v_i_se_f32m2(size_t vl) {
+  return __riscv_sf_vc_v_i_se_f32m2(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.i.se.nxv8f32.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.i.se.nxv8f32.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_sf_vc_v_i_se_f32m4(size_t vl) {
+  return __riscv_sf_vc_v_i_se_f32m4(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.i.se.nxv16f32.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.i.se.nxv16f32.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_sf_vc_v_i_se_f32m8(size_t vl) {
+  return __riscv_sf_vc_v_i_se_f32m8(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.i.se.nxv1f64.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.i.se.nxv1f64.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_sf_vc_v_i_se_f64m1(size_t vl) {
+  return __riscv_sf_vc_v_i_se_f64m1(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_f64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.i.se.nxv2f64.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.i.se.nxv2f64.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_sf_vc_v_i_se_f64m2(size_t vl) {
+  return __riscv_sf_vc_v_i_se_f64m2(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_f64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.i.se.nxv4f64.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.i.se.nxv4f64.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_sf_vc_v_i_se_f64m4(size_t vl) {
+  return __riscv_sf_vc_v_i_se_f64m4(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_f64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.i.se.nxv8f64.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.i.se.nxv8f64.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_sf_vc_v_i_se_f64m8(size_t vl) {
+  return __riscv_sf_vc_v_i_se_f64m8(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.i.nxv1f16.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.i.nxv1f16.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_sf_vc_v_i_f16mf4(size_t vl) {
+  return __riscv_sf_vc_v_i_f16mf4(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.i.nxv2f16.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.i.nxv2f16.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_sf_vc_v_i_f16mf2(size_t vl) {
+  return __riscv_sf_vc_v_i_f16mf2(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.i.nxv4f16.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.i.nxv4f16.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_sf_vc_v_i_f16m1(size_t vl) {
+  return __riscv_sf_vc_v_i_f16m1(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.i.nxv8f16.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.i.nxv8f16.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_sf_vc_v_i_f16m2(size_t vl) {
+  return __riscv_sf_vc_v_i_f16m2(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.i.nxv16f16.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.i.nxv16f16.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_sf_vc_v_i_f16m4(size_t vl) {
+  return __riscv_sf_vc_v_i_f16m4(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_f16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.i.nxv32f16.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.i.nxv32f16.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_sf_vc_v_i_f16m8(size_t vl) {
+  return __riscv_sf_vc_v_i_f16m8(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.i.nxv1f32.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.i.nxv1f32.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_sf_vc_v_i_f32mf2(size_t vl) {
+  return __riscv_sf_vc_v_i_f32mf2(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.i.nxv2f32.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.i.nxv2f32.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_sf_vc_v_i_f32m1(size_t vl) {
+  return __riscv_sf_vc_v_i_f32m1(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.i.nxv4f32.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.i.nxv4f32.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_sf_vc_v_i_f32m2(size_t vl) {
+  return __riscv_sf_vc_v_i_f32m2(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.i.nxv8f32.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.i.nxv8f32.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_sf_vc_v_i_f32m4(size_t vl) {
+  return __riscv_sf_vc_v_i_f32m4(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.i.nxv16f32.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.i.nxv16f32.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_sf_vc_v_i_f32m8(size_t vl) {
+  return __riscv_sf_vc_v_i_f32m8(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.i.nxv1f64.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.i.nxv1f64.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_sf_vc_v_i_f64m1(size_t vl) {
+  return __riscv_sf_vc_v_i_f64m1(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_f64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.i.nxv2f64.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.i.nxv2f64.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_sf_vc_v_i_f64m2(size_t vl) {
+  return __riscv_sf_vc_v_i_f64m2(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_f64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.i.nxv4f64.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.i.nxv4f64.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_sf_vc_v_i_f64m4(size_t vl) {
+  return __riscv_sf_vc_v_i_f64m4(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_f64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.i.nxv8f64.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.i.nxv8f64.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_sf_vc_v_i_f64m8(size_t vl) {
+  return __riscv_sf_vc_v_i_f64m8(p27_26, p24_20, simm5, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xv-rv64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xv-rv64.c
index 39704189ae45ed0..fb3a6b85b8589f5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xv-rv64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xv-rv64.c
@@ -114,3 +114,111 @@ vuint64m4_t test_sf_vc_v_xv_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) {
 vuint64m8_t test_sf_vc_v_xv_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) {
   return __riscv_sf_vc_v_xv_u64m8(p27_26, vs2, rs1, vl);
 }
+
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.i64.nxv1f64.i64.i64(i64 3, i64 31, <vscale x 1 x double> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_f64m1(vfloat64m1_t vs2, uint64_t rs1, size_t vl) {
+  __riscv_sf_vc_xv_se_f64m1(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.i64.nxv2f64.i64.i64(i64 3, i64 31, <vscale x 2 x double> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_f64m2(vfloat64m2_t vs2, uint64_t rs1, size_t vl) {
+  __riscv_sf_vc_xv_se_f64m2(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.i64.nxv4f64.i64.i64(i64 3, i64 31, <vscale x 4 x double> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_f64m4(vfloat64m4_t vs2, uint64_t rs1, size_t vl) {
+  __riscv_sf_vc_xv_se_f64m4(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.i64.nxv8f64.i64.i64(i64 3, i64 31, <vscale x 8 x double> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_f64m8(vfloat64m8_t vs2, uint64_t rs1, size_t vl) {
+  __riscv_sf_vc_xv_se_f64m8(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.xv.se.nxv1f64.i64.i64.i64(i64 3, <vscale x 1 x double> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_sf_vc_v_xv_se_f64m1(vfloat64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xv_se_f64m1(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.xv.se.nxv2f64.i64.i64.i64(i64 3, <vscale x 2 x double> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_sf_vc_v_xv_se_f64m2(vfloat64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xv_se_f64m2(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.xv.se.nxv4f64.i64.i64.i64(i64 3, <vscale x 4 x double> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_sf_vc_v_xv_se_f64m4(vfloat64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xv_se_f64m4(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.xv.se.nxv8f64.i64.i64.i64(i64 3, <vscale x 8 x double> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_sf_vc_v_xv_se_f64m8(vfloat64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xv_se_f64m8(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.xv.nxv1f64.i64.i64.i64(i64 3, <vscale x 1 x double> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_sf_vc_v_xv_f64m1(vfloat64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xv_f64m1(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.xv.nxv2f64.i64.i64.i64(i64 3, <vscale x 2 x double> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_sf_vc_v_xv_f64m2(vfloat64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xv_f64m2(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.xv.nxv4f64.i64.i64.i64(i64 3, <vscale x 4 x double> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_sf_vc_v_xv_f64m4(vfloat64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xv_f64m4(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.xv.nxv8f64.i64.i64.i64(i64 3, <vscale x 8 x double> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_sf_vc_v_xv_f64m8(vfloat64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xv_f64m8(p27_26, vs2, rs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xv.c
index f37748ac3020b99..59aacbb80e5bcf0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xv.c
@@ -1,7 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv32 -target-feature +v -target-feature +zfh -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
-// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv32 -target-feature +v -target-feature +zvfh -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvfh -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <sifive_vector.h>
 
@@ -3243,3 +3243,2349 @@ vuint64m4_t test_sf_vc_v_fv_u64m4(vuint64m4_t vs2, double fs1, size_t vl) {
 vuint64m8_t test_sf_vc_v_fv_u64m8(vuint64m8_t vs2, double fs1, size_t vl) {
   return __riscv_sf_vc_v_fv_u64m8(p26, vs2, fs1, vl);
 }
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.i32.nxv1f16.nxv1f16.i32(i32 3, i32 31, <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.i64.nxv1f16.nxv1f16.i64(i64 3, i64 31, <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_f16mf4(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) {
+  __riscv_sf_vc_vv_se_f16mf4(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.i32.nxv2f16.nxv2f16.i32(i32 3, i32 31, <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.i64.nxv2f16.nxv2f16.i64(i64 3, i64 31, <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_f16mf2(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) {
+  __riscv_sf_vc_vv_se_f16mf2(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.i32.nxv4f16.nxv4f16.i32(i32 3, i32 31, <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.i64.nxv4f16.nxv4f16.i64(i64 3, i64 31, <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) {
+  __riscv_sf_vc_vv_se_f16m1(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.i32.nxv8f16.nxv8f16.i32(i32 3, i32 31, <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.i64.nxv8f16.nxv8f16.i64(i64 3, i64 31, <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_f16m2(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) {
+  __riscv_sf_vc_vv_se_f16m2(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.i32.nxv16f16.nxv16f16.i32(i32 3, i32 31, <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.i64.nxv16f16.nxv16f16.i64(i64 3, i64 31, <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_f16m4(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) {
+  __riscv_sf_vc_vv_se_f16m4(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_f16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.i32.nxv32f16.nxv32f16.i32(i32 3, i32 31, <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.i64.nxv32f16.nxv32f16.i64(i64 3, i64 31, <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_f16m8(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) {
+  __riscv_sf_vc_vv_se_f16m8(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.i32.nxv1f32.nxv1f32.i32(i32 3, i32 31, <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.i64.nxv1f32.nxv1f32.i64(i64 3, i64 31, <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_f32mf2(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) {
+  __riscv_sf_vc_vv_se_f32mf2(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.i32.nxv2f32.nxv2f32.i32(i32 3, i32 31, <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.i64.nxv2f32.nxv2f32.i64(i64 3, i64 31, <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) {
+  __riscv_sf_vc_vv_se_f32m1(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.i32.nxv4f32.nxv4f32.i32(i32 3, i32 31, <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.i64.nxv4f32.nxv4f32.i64(i64 3, i64 31, <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_f32m2(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) {
+  __riscv_sf_vc_vv_se_f32m2(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.i32.nxv8f32.nxv8f32.i32(i32 3, i32 31, <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.i64.nxv8f32.nxv8f32.i64(i64 3, i64 31, <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_f32m4(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) {
+  __riscv_sf_vc_vv_se_f32m4(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.i32.nxv16f32.nxv16f32.i32(i32 3, i32 31, <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.i64.nxv16f32.nxv16f32.i64(i64 3, i64 31, <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_f32m8(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) {
+  __riscv_sf_vc_vv_se_f32m8(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.i32.nxv1f64.nxv1f64.i32(i32 3, i32 31, <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.i64.nxv1f64.nxv1f64.i64(i64 3, i64 31, <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) {
+  __riscv_sf_vc_vv_se_f64m1(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_f64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.i32.nxv2f64.nxv2f64.i32(i32 3, i32 31, <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.i64.nxv2f64.nxv2f64.i64(i64 3, i64 31, <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_f64m2(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) {
+  __riscv_sf_vc_vv_se_f64m2(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_f64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.i32.nxv4f64.nxv4f64.i32(i32 3, i32 31, <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.i64.nxv4f64.nxv4f64.i64(i64 3, i64 31, <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_f64m4(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) {
+  __riscv_sf_vc_vv_se_f64m4(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_f64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.i32.nxv8f64.nxv8f64.i32(i32 3, i32 31, <vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.i64.nxv8f64.nxv8f64.i64(i64 3, i64 31, <vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_f64m8(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) {
+  __riscv_sf_vc_vv_se_f64m8(p27_26, p11_7, vs2, vs1, vl);
+}
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.vv.se.nxv1f16.i32.nxv1f16.i32(i32 3, <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.vv.se.nxv1f16.i64.nxv1f16.i64(i64 3, <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_sf_vc_v_vv_se_f16mf4(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vv_se_f16mf4(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.vv.se.nxv2f16.i32.nxv2f16.i32(i32 3, <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.vv.se.nxv2f16.i64.nxv2f16.i64(i64 3, <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_sf_vc_v_vv_se_f16mf2(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vv_se_f16mf2(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.vv.se.nxv4f16.i32.nxv4f16.i32(i32 3, <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.vv.se.nxv4f16.i64.nxv4f16.i64(i64 3, <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_sf_vc_v_vv_se_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vv_se_f16m1(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.vv.se.nxv8f16.i32.nxv8f16.i32(i32 3, <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.vv.se.nxv8f16.i64.nxv8f16.i64(i64 3, <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_sf_vc_v_vv_se_f16m2(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vv_se_f16m2(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.vv.se.nxv16f16.i32.nxv16f16.i32(i32 3, <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.vv.se.nxv16f16.i64.nxv16f16.i64(i64 3, <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_sf_vc_v_vv_se_f16m4(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vv_se_f16m4(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_f16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.vv.se.nxv32f16.i32.nxv32f16.i32(i32 3, <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.vv.se.nxv32f16.i64.nxv32f16.i64(i64 3, <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_sf_vc_v_vv_se_f16m8(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vv_se_f16m8(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.vv.se.nxv1f32.i32.nxv1f32.i32(i32 3, <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.vv.se.nxv1f32.i64.nxv1f32.i64(i64 3, <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_sf_vc_v_vv_se_f32mf2(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vv_se_f32mf2(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.vv.se.nxv2f32.i32.nxv2f32.i32(i32 3, <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.vv.se.nxv2f32.i64.nxv2f32.i64(i64 3, <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_sf_vc_v_vv_se_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vv_se_f32m1(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vv.se.nxv4f32.i32.nxv4f32.i32(i32 3, <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vv.se.nxv4f32.i64.nxv4f32.i64(i64 3, <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_sf_vc_v_vv_se_f32m2(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vv_se_f32m2(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.vv.se.nxv8f32.i32.nxv8f32.i32(i32 3, <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.vv.se.nxv8f32.i64.nxv8f32.i64(i64 3, <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_sf_vc_v_vv_se_f32m4(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vv_se_f32m4(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.vv.se.nxv16f32.i32.nxv16f32.i32(i32 3, <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.vv.se.nxv16f32.i64.nxv16f32.i64(i64 3, <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_sf_vc_v_vv_se_f32m8(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vv_se_f32m8(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.vv.se.nxv1f64.i32.nxv1f64.i32(i32 3, <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.vv.se.nxv1f64.i64.nxv1f64.i64(i64 3, <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_sf_vc_v_vv_se_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vv_se_f64m1(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_f64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.vv.se.nxv2f64.i32.nxv2f64.i32(i32 3, <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.vv.se.nxv2f64.i64.nxv2f64.i64(i64 3, <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_sf_vc_v_vv_se_f64m2(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vv_se_f64m2(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_f64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vv.se.nxv4f64.i32.nxv4f64.i32(i32 3, <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vv.se.nxv4f64.i64.nxv4f64.i64(i64 3, <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_sf_vc_v_vv_se_f64m4(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vv_se_f64m4(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_f64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.vv.se.nxv8f64.i32.nxv8f64.i32(i32 3, <vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.vv.se.nxv8f64.i64.nxv8f64.i64(i64 3, <vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_sf_vc_v_vv_se_f64m8(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vv_se_f64m8(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.vv.nxv1f16.i32.nxv1f16.i32(i32 3, <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.vv.nxv1f16.i64.nxv1f16.i64(i64 3, <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_sf_vc_v_vv_f16mf4(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vv_f16mf4(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.vv.nxv2f16.i32.nxv2f16.i32(i32 3, <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.vv.nxv2f16.i64.nxv2f16.i64(i64 3, <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_sf_vc_v_vv_f16mf2(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vv_f16mf2(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.vv.nxv4f16.i32.nxv4f16.i32(i32 3, <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.vv.nxv4f16.i64.nxv4f16.i64(i64 3, <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_sf_vc_v_vv_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vv_f16m1(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.vv.nxv8f16.i32.nxv8f16.i32(i32 3, <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.vv.nxv8f16.i64.nxv8f16.i64(i64 3, <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_sf_vc_v_vv_f16m2(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vv_f16m2(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.vv.nxv16f16.i32.nxv16f16.i32(i32 3, <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.vv.nxv16f16.i64.nxv16f16.i64(i64 3, <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_sf_vc_v_vv_f16m4(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vv_f16m4(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_f16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.vv.nxv32f16.i32.nxv32f16.i32(i32 3, <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.vv.nxv32f16.i64.nxv32f16.i64(i64 3, <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_sf_vc_v_vv_f16m8(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vv_f16m8(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.vv.nxv1f32.i32.nxv1f32.i32(i32 3, <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.vv.nxv1f32.i64.nxv1f32.i64(i64 3, <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_sf_vc_v_vv_f32mf2(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vv_f32mf2(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.vv.nxv2f32.i32.nxv2f32.i32(i32 3, <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.vv.nxv2f32.i64.nxv2f32.i64(i64 3, <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_sf_vc_v_vv_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vv_f32m1(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vv.nxv4f32.i32.nxv4f32.i32(i32 3, <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vv.nxv4f32.i64.nxv4f32.i64(i64 3, <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_sf_vc_v_vv_f32m2(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vv_f32m2(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.vv.nxv8f32.i32.nxv8f32.i32(i32 3, <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.vv.nxv8f32.i64.nxv8f32.i64(i64 3, <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_sf_vc_v_vv_f32m4(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vv_f32m4(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.vv.nxv16f32.i32.nxv16f32.i32(i32 3, <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.vv.nxv16f32.i64.nxv16f32.i64(i64 3, <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_sf_vc_v_vv_f32m8(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vv_f32m8(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.vv.nxv1f64.i32.nxv1f64.i32(i32 3, <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.vv.nxv1f64.i64.nxv1f64.i64(i64 3, <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_sf_vc_v_vv_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vv_f64m1(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_f64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.vv.nxv2f64.i32.nxv2f64.i32(i32 3, <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.vv.nxv2f64.i64.nxv2f64.i64(i64 3, <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_sf_vc_v_vv_f64m2(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vv_f64m2(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_f64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vv.nxv4f64.i32.nxv4f64.i32(i32 3, <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vv.nxv4f64.i64.nxv4f64.i64(i64 3, <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_sf_vc_v_vv_f64m4(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vv_f64m4(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_f64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.vv.nxv8f64.i32.nxv8f64.i32(i32 3, <vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.vv.nxv8f64.i64.nxv8f64.i64(i64 3, <vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_sf_vc_v_vv_f64m8(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vv_f64m8(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xv_se_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xv.se.i32.nxv1f16.i16.i32(i32 3, i32 31, <vscale x 1 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.i64.nxv1f16.i16.i64(i64 3, i64 31, <vscale x 1 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_f16mf4(vfloat16mf4_t vs2, uint16_t rs1, size_t vl) {
+  __riscv_sf_vc_xv_se_f16mf4(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xv_se_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xv.se.i32.nxv2f16.i16.i32(i32 3, i32 31, <vscale x 2 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.i64.nxv2f16.i16.i64(i64 3, i64 31, <vscale x 2 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_f16mf2(vfloat16mf2_t vs2, uint16_t rs1, size_t vl) {
+  __riscv_sf_vc_xv_se_f16mf2(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xv_se_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xv.se.i32.nxv4f16.i16.i32(i32 3, i32 31, <vscale x 4 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.i64.nxv4f16.i16.i64(i64 3, i64 31, <vscale x 4 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_f16m1(vfloat16m1_t vs2, uint16_t rs1, size_t vl) {
+  __riscv_sf_vc_xv_se_f16m1(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xv_se_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xv.se.i32.nxv8f16.i16.i32(i32 3, i32 31, <vscale x 8 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.i64.nxv8f16.i16.i64(i64 3, i64 31, <vscale x 8 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_f16m2(vfloat16m2_t vs2, uint16_t rs1, size_t vl) {
+  __riscv_sf_vc_xv_se_f16m2(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xv_se_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xv.se.i32.nxv16f16.i16.i32(i32 3, i32 31, <vscale x 16 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.i64.nxv16f16.i16.i64(i64 3, i64 31, <vscale x 16 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_f16m4(vfloat16m4_t vs2, uint16_t rs1, size_t vl) {
+  __riscv_sf_vc_xv_se_f16m4(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xv_se_f16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xv.se.i32.nxv32f16.i16.i32(i32 3, i32 31, <vscale x 32 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.i64.nxv32f16.i16.i64(i64 3, i64 31, <vscale x 32 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_f16m8(vfloat16m8_t vs2, uint16_t rs1, size_t vl) {
+  __riscv_sf_vc_xv_se_f16m8(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xv_se_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xv.se.i32.nxv1f32.i32.i32(i32 3, i32 31, <vscale x 1 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.i64.nxv1f32.i32.i64(i64 3, i64 31, <vscale x 1 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_f32mf2(vfloat32mf2_t vs2, uint32_t rs1, size_t vl) {
+  __riscv_sf_vc_xv_se_f32mf2(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xv_se_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xv.se.i32.nxv2f32.i32.i32(i32 3, i32 31, <vscale x 2 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.i64.nxv2f32.i32.i64(i64 3, i64 31, <vscale x 2 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_f32m1(vfloat32m1_t vs2, uint32_t rs1, size_t vl) {
+  __riscv_sf_vc_xv_se_f32m1(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xv_se_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xv.se.i32.nxv4f32.i32.i32(i32 3, i32 31, <vscale x 4 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.i64.nxv4f32.i32.i64(i64 3, i64 31, <vscale x 4 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_f32m2(vfloat32m2_t vs2, uint32_t rs1, size_t vl) {
+  __riscv_sf_vc_xv_se_f32m2(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xv_se_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xv.se.i32.nxv8f32.i32.i32(i32 3, i32 31, <vscale x 8 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.i64.nxv8f32.i32.i64(i64 3, i64 31, <vscale x 8 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_f32m4(vfloat32m4_t vs2, uint32_t rs1, size_t vl) {
+  __riscv_sf_vc_xv_se_f32m4(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xv_se_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xv.se.i32.nxv16f32.i32.i32(i32 3, i32 31, <vscale x 16 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.i64.nxv16f32.i32.i64(i64 3, i64 31, <vscale x 16 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_f32m8(vfloat32m8_t vs2, uint32_t rs1, size_t vl) {
+  __riscv_sf_vc_xv_se_f32m8(p27_26, p11_7, vs2, rs1, vl);
+}
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.xv.se.nxv1f16.i32.i16.i32(i32 3, <vscale x 1 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.xv.se.nxv1f16.i64.i16.i64(i64 3, <vscale x 1 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_sf_vc_v_xv_se_f16mf4(vfloat16mf4_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xv_se_f16mf4(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.xv.se.nxv2f16.i32.i16.i32(i32 3, <vscale x 2 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.xv.se.nxv2f16.i64.i16.i64(i64 3, <vscale x 2 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_sf_vc_v_xv_se_f16mf2(vfloat16mf2_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xv_se_f16mf2(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.xv.se.nxv4f16.i32.i16.i32(i32 3, <vscale x 4 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.xv.se.nxv4f16.i64.i16.i64(i64 3, <vscale x 4 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_sf_vc_v_xv_se_f16m1(vfloat16m1_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xv_se_f16m1(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.xv.se.nxv8f16.i32.i16.i32(i32 3, <vscale x 8 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.xv.se.nxv8f16.i64.i16.i64(i64 3, <vscale x 8 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_sf_vc_v_xv_se_f16m2(vfloat16m2_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xv_se_f16m2(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.xv.se.nxv16f16.i32.i16.i32(i32 3, <vscale x 16 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.xv.se.nxv16f16.i64.i16.i64(i64 3, <vscale x 16 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_sf_vc_v_xv_se_f16m4(vfloat16m4_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xv_se_f16m4(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_f16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.xv.se.nxv32f16.i32.i16.i32(i32 3, <vscale x 32 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.xv.se.nxv32f16.i64.i16.i64(i64 3, <vscale x 32 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_sf_vc_v_xv_se_f16m8(vfloat16m8_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xv_se_f16m8(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.xv.se.nxv1f32.i32.i32.i32(i32 3, <vscale x 1 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.xv.se.nxv1f32.i64.i32.i64(i64 3, <vscale x 1 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_sf_vc_v_xv_se_f32mf2(vfloat32mf2_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xv_se_f32mf2(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.xv.se.nxv2f32.i32.i32.i32(i32 3, <vscale x 2 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.xv.se.nxv2f32.i64.i32.i64(i64 3, <vscale x 2 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_sf_vc_v_xv_se_f32m1(vfloat32m1_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xv_se_f32m1(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xv.se.nxv4f32.i32.i32.i32(i32 3, <vscale x 4 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xv.se.nxv4f32.i64.i32.i64(i64 3, <vscale x 4 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_sf_vc_v_xv_se_f32m2(vfloat32m2_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xv_se_f32m2(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.xv.se.nxv8f32.i32.i32.i32(i32 3, <vscale x 8 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.xv.se.nxv8f32.i64.i32.i64(i64 3, <vscale x 8 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_sf_vc_v_xv_se_f32m4(vfloat32m4_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xv_se_f32m4(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.xv.se.nxv16f32.i32.i32.i32(i32 3, <vscale x 16 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.xv.se.nxv16f32.i64.i32.i64(i64 3, <vscale x 16 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_sf_vc_v_xv_se_f32m8(vfloat32m8_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xv_se_f32m8(p27_26, vs2, rs1, vl);
+}
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.xv.nxv1f16.i32.i16.i32(i32 3, <vscale x 1 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.xv.nxv1f16.i64.i16.i64(i64 3, <vscale x 1 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_sf_vc_v_xv_f16mf4(vfloat16mf4_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xv_f16mf4(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.xv.nxv2f16.i32.i16.i32(i32 3, <vscale x 2 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.xv.nxv2f16.i64.i16.i64(i64 3, <vscale x 2 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_sf_vc_v_xv_f16mf2(vfloat16mf2_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xv_f16mf2(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.xv.nxv4f16.i32.i16.i32(i32 3, <vscale x 4 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.xv.nxv4f16.i64.i16.i64(i64 3, <vscale x 4 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_sf_vc_v_xv_f16m1(vfloat16m1_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xv_f16m1(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.xv.nxv8f16.i32.i16.i32(i32 3, <vscale x 8 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.xv.nxv8f16.i64.i16.i64(i64 3, <vscale x 8 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_sf_vc_v_xv_f16m2(vfloat16m2_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xv_f16m2(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.xv.nxv16f16.i32.i16.i32(i32 3, <vscale x 16 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.xv.nxv16f16.i64.i16.i64(i64 3, <vscale x 16 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_sf_vc_v_xv_f16m4(vfloat16m4_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xv_f16m4(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_f16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.xv.nxv32f16.i32.i16.i32(i32 3, <vscale x 32 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.xv.nxv32f16.i64.i16.i64(i64 3, <vscale x 32 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_sf_vc_v_xv_f16m8(vfloat16m8_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xv_f16m8(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.xv.nxv1f32.i32.i32.i32(i32 3, <vscale x 1 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.xv.nxv1f32.i64.i32.i64(i64 3, <vscale x 1 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_sf_vc_v_xv_f32mf2(vfloat32mf2_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xv_f32mf2(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.xv.nxv2f32.i32.i32.i32(i32 3, <vscale x 2 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.xv.nxv2f32.i64.i32.i64(i64 3, <vscale x 2 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_sf_vc_v_xv_f32m1(vfloat32m1_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xv_f32m1(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xv.nxv4f32.i32.i32.i32(i32 3, <vscale x 4 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xv.nxv4f32.i64.i32.i64(i64 3, <vscale x 4 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_sf_vc_v_xv_f32m2(vfloat32m2_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xv_f32m2(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.xv.nxv8f32.i32.i32.i32(i32 3, <vscale x 8 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.xv.nxv8f32.i64.i32.i64(i64 3, <vscale x 8 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_sf_vc_v_xv_f32m4(vfloat32m4_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xv_f32m4(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.xv.nxv16f32.i32.i32.i32(i32 3, <vscale x 16 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.xv.nxv16f32.i64.i32.i64(i64 3, <vscale x 16 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_sf_vc_v_xv_f32m8(vfloat32m8_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xv_f32m8(p27_26, vs2, rs1, vl);
+}
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.i32.nxv1f16.i32.i32(i32 3, i32 31, <vscale x 1 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.i64.nxv1f16.i64.i64(i64 3, i64 31, <vscale x 1 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_f16mf4(vfloat16mf4_t vs2, size_t vl) {
+  __riscv_sf_vc_iv_se_f16mf4(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.i32.nxv2f16.i32.i32(i32 3, i32 31, <vscale x 2 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.i64.nxv2f16.i64.i64(i64 3, i64 31, <vscale x 2 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_f16mf2(vfloat16mf2_t vs2, size_t vl) {
+  __riscv_sf_vc_iv_se_f16mf2(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.i32.nxv4f16.i32.i32(i32 3, i32 31, <vscale x 4 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.i64.nxv4f16.i64.i64(i64 3, i64 31, <vscale x 4 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_f16m1(vfloat16m1_t vs2, size_t vl) {
+  __riscv_sf_vc_iv_se_f16m1(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.i32.nxv8f16.i32.i32(i32 3, i32 31, <vscale x 8 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.i64.nxv8f16.i64.i64(i64 3, i64 31, <vscale x 8 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_f16m2(vfloat16m2_t vs2, size_t vl) {
+  __riscv_sf_vc_iv_se_f16m2(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.i32.nxv16f16.i32.i32(i32 3, i32 31, <vscale x 16 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.i64.nxv16f16.i64.i64(i64 3, i64 31, <vscale x 16 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_f16m4(vfloat16m4_t vs2, size_t vl) {
+  __riscv_sf_vc_iv_se_f16m4(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_f16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.i32.nxv32f16.i32.i32(i32 3, i32 31, <vscale x 32 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.i64.nxv32f16.i64.i64(i64 3, i64 31, <vscale x 32 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_f16m8(vfloat16m8_t vs2, size_t vl) {
+  __riscv_sf_vc_iv_se_f16m8(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.i32.nxv1f32.i32.i32(i32 3, i32 31, <vscale x 1 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.i64.nxv1f32.i64.i64(i64 3, i64 31, <vscale x 1 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_f32mf2(vfloat32mf2_t vs2, size_t vl) {
+  __riscv_sf_vc_iv_se_f32mf2(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.i32.nxv2f32.i32.i32(i32 3, i32 31, <vscale x 2 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.i64.nxv2f32.i64.i64(i64 3, i64 31, <vscale x 2 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_f32m1(vfloat32m1_t vs2, size_t vl) {
+  __riscv_sf_vc_iv_se_f32m1(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.i32.nxv4f32.i32.i32(i32 3, i32 31, <vscale x 4 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.i64.nxv4f32.i64.i64(i64 3, i64 31, <vscale x 4 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_f32m2(vfloat32m2_t vs2, size_t vl) {
+  __riscv_sf_vc_iv_se_f32m2(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.i32.nxv8f32.i32.i32(i32 3, i32 31, <vscale x 8 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.i64.nxv8f32.i64.i64(i64 3, i64 31, <vscale x 8 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_f32m4(vfloat32m4_t vs2, size_t vl) {
+  __riscv_sf_vc_iv_se_f32m4(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.i32.nxv16f32.i32.i32(i32 3, i32 31, <vscale x 16 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.i64.nxv16f32.i64.i64(i64 3, i64 31, <vscale x 16 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_f32m8(vfloat32m8_t vs2, size_t vl) {
+  __riscv_sf_vc_iv_se_f32m8(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.i32.nxv1f64.i32.i32(i32 3, i32 31, <vscale x 1 x double> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.i64.nxv1f64.i64.i64(i64 3, i64 31, <vscale x 1 x double> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_f64m1(vfloat64m1_t vs2, size_t vl) {
+  __riscv_sf_vc_iv_se_f64m1(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_f64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.i32.nxv2f64.i32.i32(i32 3, i32 31, <vscale x 2 x double> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.i64.nxv2f64.i64.i64(i64 3, i64 31, <vscale x 2 x double> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_f64m2(vfloat64m2_t vs2, size_t vl) {
+  __riscv_sf_vc_iv_se_f64m2(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_f64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.i32.nxv4f64.i32.i32(i32 3, i32 31, <vscale x 4 x double> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.i64.nxv4f64.i64.i64(i64 3, i64 31, <vscale x 4 x double> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_f64m4(vfloat64m4_t vs2, size_t vl) {
+  __riscv_sf_vc_iv_se_f64m4(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_f64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.i32.nxv8f64.i32.i32(i32 3, i32 31, <vscale x 8 x double> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.i64.nxv8f64.i64.i64(i64 3, i64 31, <vscale x 8 x double> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_f64m8(vfloat64m8_t vs2, size_t vl) {
+  __riscv_sf_vc_iv_se_f64m8(p27_26, p11_7, vs2, simm5, vl);
+}
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.iv.se.nxv1f16.i32.i32.i32(i32 3, <vscale x 1 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.iv.se.nxv1f16.i64.i64.i64(i64 3, <vscale x 1 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_sf_vc_v_iv_se_f16mf4(vfloat16mf4_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_iv_se_f16mf4(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.iv.se.nxv2f16.i32.i32.i32(i32 3, <vscale x 2 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.iv.se.nxv2f16.i64.i64.i64(i64 3, <vscale x 2 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_sf_vc_v_iv_se_f16mf2(vfloat16mf2_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_iv_se_f16mf2(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.iv.se.nxv4f16.i32.i32.i32(i32 3, <vscale x 4 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.iv.se.nxv4f16.i64.i64.i64(i64 3, <vscale x 4 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_sf_vc_v_iv_se_f16m1(vfloat16m1_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_iv_se_f16m1(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.iv.se.nxv8f16.i32.i32.i32(i32 3, <vscale x 8 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.iv.se.nxv8f16.i64.i64.i64(i64 3, <vscale x 8 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_sf_vc_v_iv_se_f16m2(vfloat16m2_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_iv_se_f16m2(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.iv.se.nxv16f16.i32.i32.i32(i32 3, <vscale x 16 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.iv.se.nxv16f16.i64.i64.i64(i64 3, <vscale x 16 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_sf_vc_v_iv_se_f16m4(vfloat16m4_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_iv_se_f16m4(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_f16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.iv.se.nxv32f16.i32.i32.i32(i32 3, <vscale x 32 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.iv.se.nxv32f16.i64.i64.i64(i64 3, <vscale x 32 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_sf_vc_v_iv_se_f16m8(vfloat16m8_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_iv_se_f16m8(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.iv.se.nxv1f32.i32.i32.i32(i32 3, <vscale x 1 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.iv.se.nxv1f32.i64.i64.i64(i64 3, <vscale x 1 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_sf_vc_v_iv_se_f32mf2(vfloat32mf2_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_iv_se_f32mf2(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.iv.se.nxv2f32.i32.i32.i32(i32 3, <vscale x 2 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.iv.se.nxv2f32.i64.i64.i64(i64 3, <vscale x 2 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_sf_vc_v_iv_se_f32m1(vfloat32m1_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_iv_se_f32m1(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.iv.se.nxv4f32.i32.i32.i32(i32 3, <vscale x 4 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.iv.se.nxv4f32.i64.i64.i64(i64 3, <vscale x 4 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_sf_vc_v_iv_se_f32m2(vfloat32m2_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_iv_se_f32m2(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.iv.se.nxv8f32.i32.i32.i32(i32 3, <vscale x 8 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.iv.se.nxv8f32.i64.i64.i64(i64 3, <vscale x 8 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_sf_vc_v_iv_se_f32m4(vfloat32m4_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_iv_se_f32m4(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.iv.se.nxv16f32.i32.i32.i32(i32 3, <vscale x 16 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.iv.se.nxv16f32.i64.i64.i64(i64 3, <vscale x 16 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_sf_vc_v_iv_se_f32m8(vfloat32m8_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_iv_se_f32m8(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.iv.se.nxv1f64.i32.i32.i32(i32 3, <vscale x 1 x double> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.iv.se.nxv1f64.i64.i64.i64(i64 3, <vscale x 1 x double> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_sf_vc_v_iv_se_f64m1(vfloat64m1_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_iv_se_f64m1(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_f64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.iv.se.nxv2f64.i32.i32.i32(i32 3, <vscale x 2 x double> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.iv.se.nxv2f64.i64.i64.i64(i64 3, <vscale x 2 x double> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_sf_vc_v_iv_se_f64m2(vfloat64m2_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_iv_se_f64m2(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_f64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.iv.se.nxv4f64.i32.i32.i32(i32 3, <vscale x 4 x double> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.iv.se.nxv4f64.i64.i64.i64(i64 3, <vscale x 4 x double> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_sf_vc_v_iv_se_f64m4(vfloat64m4_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_iv_se_f64m4(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_f64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.iv.se.nxv8f64.i32.i32.i32(i32 3, <vscale x 8 x double> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.iv.se.nxv8f64.i64.i64.i64(i64 3, <vscale x 8 x double> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_sf_vc_v_iv_se_f64m8(vfloat64m8_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_iv_se_f64m8(p27_26, vs2, simm5, vl);
+}
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.iv.nxv1f16.i32.i32.i32(i32 3, <vscale x 1 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.iv.nxv1f16.i64.i64.i64(i64 3, <vscale x 1 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_sf_vc_v_iv_f16mf4(vfloat16mf4_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_iv_f16mf4(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.iv.nxv2f16.i32.i32.i32(i32 3, <vscale x 2 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.iv.nxv2f16.i64.i64.i64(i64 3, <vscale x 2 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_sf_vc_v_iv_f16mf2(vfloat16mf2_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_iv_f16mf2(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.iv.nxv4f16.i32.i32.i32(i32 3, <vscale x 4 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.iv.nxv4f16.i64.i64.i64(i64 3, <vscale x 4 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_sf_vc_v_iv_f16m1(vfloat16m1_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_iv_f16m1(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.iv.nxv8f16.i32.i32.i32(i32 3, <vscale x 8 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.iv.nxv8f16.i64.i64.i64(i64 3, <vscale x 8 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_sf_vc_v_iv_f16m2(vfloat16m2_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_iv_f16m2(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.iv.nxv16f16.i32.i32.i32(i32 3, <vscale x 16 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.iv.nxv16f16.i64.i64.i64(i64 3, <vscale x 16 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_sf_vc_v_iv_f16m4(vfloat16m4_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_iv_f16m4(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_f16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.iv.nxv32f16.i32.i32.i32(i32 3, <vscale x 32 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.iv.nxv32f16.i64.i64.i64(i64 3, <vscale x 32 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_sf_vc_v_iv_f16m8(vfloat16m8_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_iv_f16m8(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.iv.nxv1f32.i32.i32.i32(i32 3, <vscale x 1 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.iv.nxv1f32.i64.i64.i64(i64 3, <vscale x 1 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_sf_vc_v_iv_f32mf2(vfloat32mf2_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_iv_f32mf2(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.iv.nxv2f32.i32.i32.i32(i32 3, <vscale x 2 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.iv.nxv2f32.i64.i64.i64(i64 3, <vscale x 2 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_sf_vc_v_iv_f32m1(vfloat32m1_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_iv_f32m1(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.iv.nxv4f32.i32.i32.i32(i32 3, <vscale x 4 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.iv.nxv4f32.i64.i64.i64(i64 3, <vscale x 4 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_sf_vc_v_iv_f32m2(vfloat32m2_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_iv_f32m2(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.iv.nxv8f32.i32.i32.i32(i32 3, <vscale x 8 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.iv.nxv8f32.i64.i64.i64(i64 3, <vscale x 8 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_sf_vc_v_iv_f32m4(vfloat32m4_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_iv_f32m4(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.iv.nxv16f32.i32.i32.i32(i32 3, <vscale x 16 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.iv.nxv16f32.i64.i64.i64(i64 3, <vscale x 16 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_sf_vc_v_iv_f32m8(vfloat32m8_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_iv_f32m8(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.iv.nxv1f64.i32.i32.i32(i32 3, <vscale x 1 x double> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.iv.nxv1f64.i64.i64.i64(i64 3, <vscale x 1 x double> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_sf_vc_v_iv_f64m1(vfloat64m1_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_iv_f64m1(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_f64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.iv.nxv2f64.i32.i32.i32(i32 3, <vscale x 2 x double> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.iv.nxv2f64.i64.i64.i64(i64 3, <vscale x 2 x double> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_sf_vc_v_iv_f64m2(vfloat64m2_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_iv_f64m2(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_f64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.iv.nxv4f64.i32.i32.i32(i32 3, <vscale x 4 x double> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.iv.nxv4f64.i64.i64.i64(i64 3, <vscale x 4 x double> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_sf_vc_v_iv_f64m4(vfloat64m4_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_iv_f64m4(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_f64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.iv.nxv8f64.i32.i32.i32(i32 3, <vscale x 8 x double> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.iv.nxv8f64.i64.i64.i64(i64 3, <vscale x 8 x double> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_sf_vc_v_iv_f64m8(vfloat64m8_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_iv_f64m8(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fv_se_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fv.se.i32.nxv1f16.f16.i32(i32 1, i32 31, <vscale x 1 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fv_se_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fv.se.i64.nxv1f16.f16.i64(i64 1, i64 31, <vscale x 1 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fv_se_f16mf4(vfloat16mf4_t vs2, _Float16 fs1, size_t vl) {
+  __riscv_sf_vc_fv_se_f16mf4(p26, p11_7, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fv_se_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fv.se.i32.nxv2f16.f16.i32(i32 1, i32 31, <vscale x 2 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fv_se_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fv.se.i64.nxv2f16.f16.i64(i64 1, i64 31, <vscale x 2 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fv_se_f16mf2(vfloat16mf2_t vs2, _Float16 fs1, size_t vl) {
+  __riscv_sf_vc_fv_se_f16mf2(p26, p11_7, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fv_se_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fv.se.i32.nxv4f16.f16.i32(i32 1, i32 31, <vscale x 4 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fv_se_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fv.se.i64.nxv4f16.f16.i64(i64 1, i64 31, <vscale x 4 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fv_se_f16m1(vfloat16m1_t vs2, _Float16 fs1, size_t vl) {
+  __riscv_sf_vc_fv_se_f16m1(p26, p11_7, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fv_se_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fv.se.i32.nxv8f16.f16.i32(i32 1, i32 31, <vscale x 8 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fv_se_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fv.se.i64.nxv8f16.f16.i64(i64 1, i64 31, <vscale x 8 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fv_se_f16m2(vfloat16m2_t vs2, _Float16 fs1, size_t vl) {
+  __riscv_sf_vc_fv_se_f16m2(p26, p11_7, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fv_se_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fv.se.i32.nxv16f16.f16.i32(i32 1, i32 31, <vscale x 16 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fv_se_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fv.se.i64.nxv16f16.f16.i64(i64 1, i64 31, <vscale x 16 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fv_se_f16m4(vfloat16m4_t vs2, _Float16 fs1, size_t vl) {
+  __riscv_sf_vc_fv_se_f16m4(p26, p11_7, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fv_se_f16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fv.se.i32.nxv32f16.f16.i32(i32 1, i32 31, <vscale x 32 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fv_se_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fv.se.i64.nxv32f16.f16.i64(i64 1, i64 31, <vscale x 32 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fv_se_f16m8(vfloat16m8_t vs2, _Float16 fs1, size_t vl) {
+  __riscv_sf_vc_fv_se_f16m8(p26, p11_7, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fv_se_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fv.se.i32.nxv1f32.f32.i32(i32 1, i32 31, <vscale x 1 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fv_se_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fv.se.i64.nxv1f32.f32.i64(i64 1, i64 31, <vscale x 1 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fv_se_f32mf2(vfloat32mf2_t vs2, float fs1, size_t vl) {
+  __riscv_sf_vc_fv_se_f32mf2(p26, p11_7, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fv_se_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fv.se.i32.nxv2f32.f32.i32(i32 1, i32 31, <vscale x 2 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fv_se_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fv.se.i64.nxv2f32.f32.i64(i64 1, i64 31, <vscale x 2 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fv_se_f32m1(vfloat32m1_t vs2, float fs1, size_t vl) {
+  __riscv_sf_vc_fv_se_f32m1(p26, p11_7, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fv_se_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fv.se.i32.nxv4f32.f32.i32(i32 1, i32 31, <vscale x 4 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fv_se_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fv.se.i64.nxv4f32.f32.i64(i64 1, i64 31, <vscale x 4 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fv_se_f32m2(vfloat32m2_t vs2, float fs1, size_t vl) {
+  __riscv_sf_vc_fv_se_f32m2(p26, p11_7, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fv_se_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fv.se.i32.nxv8f32.f32.i32(i32 1, i32 31, <vscale x 8 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fv_se_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fv.se.i64.nxv8f32.f32.i64(i64 1, i64 31, <vscale x 8 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fv_se_f32m4(vfloat32m4_t vs2, float fs1, size_t vl) {
+  __riscv_sf_vc_fv_se_f32m4(p26, p11_7, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fv_se_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fv.se.i32.nxv16f32.f32.i32(i32 1, i32 31, <vscale x 16 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fv_se_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fv.se.i64.nxv16f32.f32.i64(i64 1, i64 31, <vscale x 16 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fv_se_f32m8(vfloat32m8_t vs2, float fs1, size_t vl) {
+  __riscv_sf_vc_fv_se_f32m8(p26, p11_7, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fv_se_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fv.se.i32.nxv1f64.f64.i32(i32 1, i32 31, <vscale x 1 x double> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fv_se_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fv.se.i64.nxv1f64.f64.i64(i64 1, i64 31, <vscale x 1 x double> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fv_se_f64m1(vfloat64m1_t vs2, double fs1, size_t vl) {
+  __riscv_sf_vc_fv_se_f64m1(p26, p11_7, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fv_se_f64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fv.se.i32.nxv2f64.f64.i32(i32 1, i32 31, <vscale x 2 x double> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fv_se_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fv.se.i64.nxv2f64.f64.i64(i64 1, i64 31, <vscale x 2 x double> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fv_se_f64m2(vfloat64m2_t vs2, double fs1, size_t vl) {
+  __riscv_sf_vc_fv_se_f64m2(p26, p11_7, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fv_se_f64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fv.se.i32.nxv4f64.f64.i32(i32 1, i32 31, <vscale x 4 x double> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fv_se_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fv.se.i64.nxv4f64.f64.i64(i64 1, i64 31, <vscale x 4 x double> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fv_se_f64m4(vfloat64m4_t vs2, double fs1, size_t vl) {
+  __riscv_sf_vc_fv_se_f64m4(p26, p11_7, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fv_se_f64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fv.se.i32.nxv8f64.f64.i32(i32 1, i32 31, <vscale x 8 x double> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fv_se_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fv.se.i64.nxv8f64.f64.i64(i64 1, i64 31, <vscale x 8 x double> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fv_se_f64m8(vfloat64m8_t vs2, double fs1, size_t vl) {
+  __riscv_sf_vc_fv_se_f64m8(p26, p11_7, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.fv.se.nxv1f16.i32.f16.i32(i32 1, <vscale x 1 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.fv.se.nxv1f16.i64.f16.i64(i64 1, <vscale x 1 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_sf_vc_v_fv_se_f16mf4(vfloat16mf4_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fv_se_f16mf4(p26, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.fv.se.nxv2f16.i32.f16.i32(i32 1, <vscale x 2 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.fv.se.nxv2f16.i64.f16.i64(i64 1, <vscale x 2 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_sf_vc_v_fv_se_f16mf2(vfloat16mf2_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fv_se_f16mf2(p26, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.fv.se.nxv4f16.i32.f16.i32(i32 1, <vscale x 4 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.fv.se.nxv4f16.i64.f16.i64(i64 1, <vscale x 4 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_sf_vc_v_fv_se_f16m1(vfloat16m1_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fv_se_f16m1(p26, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.fv.se.nxv8f16.i32.f16.i32(i32 1, <vscale x 8 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.fv.se.nxv8f16.i64.f16.i64(i64 1, <vscale x 8 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_sf_vc_v_fv_se_f16m2(vfloat16m2_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fv_se_f16m2(p26, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.fv.se.nxv16f16.i32.f16.i32(i32 1, <vscale x 16 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.fv.se.nxv16f16.i64.f16.i64(i64 1, <vscale x 16 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_sf_vc_v_fv_se_f16m4(vfloat16m4_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fv_se_f16m4(p26, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_f16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.fv.se.nxv32f16.i32.f16.i32(i32 1, <vscale x 32 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.fv.se.nxv32f16.i64.f16.i64(i64 1, <vscale x 32 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_sf_vc_v_fv_se_f16m8(vfloat16m8_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fv_se_f16m8(p26, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.fv.se.nxv1f32.i32.f32.i32(i32 1, <vscale x 1 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.fv.se.nxv1f32.i64.f32.i64(i64 1, <vscale x 1 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_sf_vc_v_fv_se_f32mf2(vfloat32mf2_t vs2, float fs1, size_t vl) {
+  return __riscv_sf_vc_v_fv_se_f32mf2(p26, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.fv.se.nxv2f32.i32.f32.i32(i32 1, <vscale x 2 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.fv.se.nxv2f32.i64.f32.i64(i64 1, <vscale x 2 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_sf_vc_v_fv_se_f32m1(vfloat32m1_t vs2, float fs1, size_t vl) {
+  return __riscv_sf_vc_v_fv_se_f32m1(p26, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fv.se.nxv4f32.i32.f32.i32(i32 1, <vscale x 4 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fv.se.nxv4f32.i64.f32.i64(i64 1, <vscale x 4 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_sf_vc_v_fv_se_f32m2(vfloat32m2_t vs2, float fs1, size_t vl) {
+  return __riscv_sf_vc_v_fv_se_f32m2(p26, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.fv.se.nxv8f32.i32.f32.i32(i32 1, <vscale x 8 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.fv.se.nxv8f32.i64.f32.i64(i64 1, <vscale x 8 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_sf_vc_v_fv_se_f32m4(vfloat32m4_t vs2, float fs1, size_t vl) {
+  return __riscv_sf_vc_v_fv_se_f32m4(p26, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.fv.se.nxv16f32.i32.f32.i32(i32 1, <vscale x 16 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.fv.se.nxv16f32.i64.f32.i64(i64 1, <vscale x 16 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_sf_vc_v_fv_se_f32m8(vfloat32m8_t vs2, float fs1, size_t vl) {
+  return __riscv_sf_vc_v_fv_se_f32m8(p26, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.fv.se.nxv1f64.i32.f64.i32(i32 1, <vscale x 1 x double> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.fv.se.nxv1f64.i64.f64.i64(i64 1, <vscale x 1 x double> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_sf_vc_v_fv_se_f64m1(vfloat64m1_t vs2, double fs1, size_t vl) {
+  return __riscv_sf_vc_v_fv_se_f64m1(p26, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_f64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.fv.se.nxv2f64.i32.f64.i32(i32 1, <vscale x 2 x double> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.fv.se.nxv2f64.i64.f64.i64(i64 1, <vscale x 2 x double> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_sf_vc_v_fv_se_f64m2(vfloat64m2_t vs2, double fs1, size_t vl) {
+  return __riscv_sf_vc_v_fv_se_f64m2(p26, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_f64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.fv.se.nxv4f64.i32.f64.i32(i32 1, <vscale x 4 x double> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.fv.se.nxv4f64.i64.f64.i64(i64 1, <vscale x 4 x double> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_sf_vc_v_fv_se_f64m4(vfloat64m4_t vs2, double fs1, size_t vl) {
+  return __riscv_sf_vc_v_fv_se_f64m4(p26, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_f64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.fv.se.nxv8f64.i32.f64.i32(i32 1, <vscale x 8 x double> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.fv.se.nxv8f64.i64.f64.i64(i64 1, <vscale x 8 x double> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_sf_vc_v_fv_se_f64m8(vfloat64m8_t vs2, double fs1, size_t vl) {
+  return __riscv_sf_vc_v_fv_se_f64m8(p26, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.fv.nxv1f16.i32.f16.i32(i32 1, <vscale x 1 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.fv.nxv1f16.i64.f16.i64(i64 1, <vscale x 1 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_sf_vc_v_fv_f16mf4(vfloat16mf4_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fv_f16mf4(p26, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.fv.nxv2f16.i32.f16.i32(i32 1, <vscale x 2 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.fv.nxv2f16.i64.f16.i64(i64 1, <vscale x 2 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_sf_vc_v_fv_f16mf2(vfloat16mf2_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fv_f16mf2(p26, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.fv.nxv4f16.i32.f16.i32(i32 1, <vscale x 4 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.fv.nxv4f16.i64.f16.i64(i64 1, <vscale x 4 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_sf_vc_v_fv_f16m1(vfloat16m1_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fv_f16m1(p26, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.fv.nxv8f16.i32.f16.i32(i32 1, <vscale x 8 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.fv.nxv8f16.i64.f16.i64(i64 1, <vscale x 8 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_sf_vc_v_fv_f16m2(vfloat16m2_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fv_f16m2(p26, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.fv.nxv16f16.i32.f16.i32(i32 1, <vscale x 16 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.fv.nxv16f16.i64.f16.i64(i64 1, <vscale x 16 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_sf_vc_v_fv_f16m4(vfloat16m4_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fv_f16m4(p26, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_f16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.fv.nxv32f16.i32.f16.i32(i32 1, <vscale x 32 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.fv.nxv32f16.i64.f16.i64(i64 1, <vscale x 32 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_sf_vc_v_fv_f16m8(vfloat16m8_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fv_f16m8(p26, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.fv.nxv1f32.i32.f32.i32(i32 1, <vscale x 1 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.fv.nxv1f32.i64.f32.i64(i64 1, <vscale x 1 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_sf_vc_v_fv_f32mf2(vfloat32mf2_t vs2, float fs1, size_t vl) {
+  return __riscv_sf_vc_v_fv_f32mf2(p26, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.fv.nxv2f32.i32.f32.i32(i32 1, <vscale x 2 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.fv.nxv2f32.i64.f32.i64(i64 1, <vscale x 2 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_sf_vc_v_fv_f32m1(vfloat32m1_t vs2, float fs1, size_t vl) {
+  return __riscv_sf_vc_v_fv_f32m1(p26, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fv.nxv4f32.i32.f32.i32(i32 1, <vscale x 4 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fv.nxv4f32.i64.f32.i64(i64 1, <vscale x 4 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_sf_vc_v_fv_f32m2(vfloat32m2_t vs2, float fs1, size_t vl) {
+  return __riscv_sf_vc_v_fv_f32m2(p26, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.fv.nxv8f32.i32.f32.i32(i32 1, <vscale x 8 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.fv.nxv8f32.i64.f32.i64(i64 1, <vscale x 8 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_sf_vc_v_fv_f32m4(vfloat32m4_t vs2, float fs1, size_t vl) {
+  return __riscv_sf_vc_v_fv_f32m4(p26, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.fv.nxv16f32.i32.f32.i32(i32 1, <vscale x 16 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.fv.nxv16f32.i64.f32.i64(i64 1, <vscale x 16 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_sf_vc_v_fv_f32m8(vfloat32m8_t vs2, float fs1, size_t vl) {
+  return __riscv_sf_vc_v_fv_f32m8(p26, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.fv.nxv1f64.i32.f64.i32(i32 1, <vscale x 1 x double> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.fv.nxv1f64.i64.f64.i64(i64 1, <vscale x 1 x double> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_sf_vc_v_fv_f64m1(vfloat64m1_t vs2, double fs1, size_t vl) {
+  return __riscv_sf_vc_v_fv_f64m1(p26, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_f64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.fv.nxv2f64.i32.f64.i32(i32 1, <vscale x 2 x double> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.fv.nxv2f64.i64.f64.i64(i64 1, <vscale x 2 x double> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_sf_vc_v_fv_f64m2(vfloat64m2_t vs2, double fs1, size_t vl) {
+  return __riscv_sf_vc_v_fv_f64m2(p26, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_f64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.fv.nxv4f64.i32.f64.i32(i32 1, <vscale x 4 x double> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.fv.nxv4f64.i64.f64.i64(i64 1, <vscale x 4 x double> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_sf_vc_v_fv_f64m4(vfloat64m4_t vs2, double fs1, size_t vl) {
+  return __riscv_sf_vc_v_fv_f64m4(p26, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_f64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.fv.nxv8f64.i32.f64.i32(i32 1, <vscale x 8 x double> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.fv.nxv8f64.i64.f64.i64(i64 1, <vscale x 8 x double> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_sf_vc_v_fv_f64m8(vfloat64m8_t vs2, double fs1, size_t vl) {
+  return __riscv_sf_vc_v_fv_f64m8(p26, vs2, fs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvv-rv64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvv-rv64.c
index 8b0c73776948e60..29c874ae5285150 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvv-rv64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvv-rv64.c
@@ -1,6 +1,6 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvfh -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <sifive_vector.h>
 
@@ -113,3 +113,111 @@ vuint64m4_t test_sf_vc_v_xvv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1
 vuint64m8_t test_sf_vc_v_xvv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
   return __riscv_sf_vc_v_xvv_u64m8(p27_26, vd, vs2, rs1, vl);
 }
+
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv1f64.i64.i64(i64 3, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_f64m1(vfloat64m1_t vd, vfloat64m1_t vs2, uint64_t rs1, size_t vl) {
+  __riscv_sf_vc_xvv_se_f64m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv2f64.i64.i64(i64 3, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_f64m2(vfloat64m2_t vd, vfloat64m2_t vs2, uint64_t rs1, size_t vl) {
+  __riscv_sf_vc_xvv_se_f64m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv4f64.i64.i64(i64 3, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_f64m4(vfloat64m4_t vd, vfloat64m4_t vs2, uint64_t rs1, size_t vl) {
+  __riscv_sf_vc_xvv_se_f64m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv8f64.i64.i64(i64 3, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_f64m8(vfloat64m8_t vd, vfloat64m8_t vs2, uint64_t rs1, size_t vl) {
+  __riscv_sf_vc_xvv_se_f64m8(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.xvv.se.nxv1f64.i64.i64.i64(i64 3, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_sf_vc_v_xvv_se_f64m1(vfloat64m1_t vd, vfloat64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvv_se_f64m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.xvv.se.nxv2f64.i64.i64.i64(i64 3, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_sf_vc_v_xvv_se_f64m2(vfloat64m2_t vd, vfloat64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvv_se_f64m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.xvv.se.nxv4f64.i64.i64.i64(i64 3, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_sf_vc_v_xvv_se_f64m4(vfloat64m4_t vd, vfloat64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvv_se_f64m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.xvv.se.nxv8f64.i64.i64.i64(i64 3, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_sf_vc_v_xvv_se_f64m8(vfloat64m8_t vd, vfloat64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvv_se_f64m8(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.xvv.nxv1f64.i64.i64.i64(i64 3, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_sf_vc_v_xvv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvv_f64m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.xvv.nxv2f64.i64.i64.i64(i64 3, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_sf_vc_v_xvv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvv_f64m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.xvv.nxv4f64.i64.i64.i64(i64 3, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_sf_vc_v_xvv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvv_f64m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.xvv.nxv8f64.i64.i64.i64(i64 3, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_sf_vc_v_xvv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs2, uint64_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvv_f64m8(p27_26, vd, vs2, rs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvv.c
index 4efd7da81bac450..8169c8d6ae295ab 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvv.c
@@ -1,7 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv32 -target-feature +v -target-feature +zfh -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
-// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv32 -target-feature +v -target-feature +zvfh -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvfh -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <sifive_vector.h>
 
@@ -3242,3 +3242,2355 @@ vuint64m4_t test_sf_vc_v_fvv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, double fs1,
 vuint64m8_t test_sf_vc_v_fvv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, double fs1, size_t vl) {
   return __riscv_sf_vc_v_fvv_u64m8(p26, vd, vs2, fs1, vl);
 }
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv1f16.nxv1f16.i32(i32 3, <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv1f16.nxv1f16.i64(i64 3, <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) {
+  __riscv_sf_vc_vvv_se_f16mf4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv2f16.nxv2f16.i32(i32 3, <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv2f16.nxv2f16.i64(i64 3, <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) {
+  __riscv_sf_vc_vvv_se_f16mf2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv4f16.nxv4f16.i32(i32 3, <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv4f16.nxv4f16.i64(i64 3, <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_f16m1(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) {
+  __riscv_sf_vc_vvv_se_f16m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv8f16.nxv8f16.i32(i32 3, <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv8f16.nxv8f16.i64(i64 3, <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_f16m2(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) {
+  __riscv_sf_vc_vvv_se_f16m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv16f16.nxv16f16.i32(i32 3, <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv16f16.nxv16f16.i64(i64 3, <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_f16m4(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) {
+  __riscv_sf_vc_vvv_se_f16m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_f16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv32f16.nxv32f16.i32(i32 3, <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv32f16.nxv32f16.i64(i64 3, <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_f16m8(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) {
+  __riscv_sf_vc_vvv_se_f16m8(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv1f32.nxv1f32.i32(i32 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv1f32.nxv1f32.i64(i64 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) {
+  __riscv_sf_vc_vvv_se_f32mf2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv2f32.nxv2f32.i32(i32 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv2f32.nxv2f32.i64(i64 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_f32m1(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) {
+  __riscv_sf_vc_vvv_se_f32m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv4f32.nxv4f32.i32(i32 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv4f32.nxv4f32.i64(i64 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_f32m2(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) {
+  __riscv_sf_vc_vvv_se_f32m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv8f32.nxv8f32.i32(i32 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv8f32.nxv8f32.i64(i64 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_f32m4(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) {
+  __riscv_sf_vc_vvv_se_f32m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv16f32.nxv16f32.i32(i32 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv16f32.nxv16f32.i64(i64 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_f32m8(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) {
+  __riscv_sf_vc_vvv_se_f32m8(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv1f64.nxv1f64.i32(i32 3, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv1f64.nxv1f64.i64(i64 3, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_f64m1(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) {
+  __riscv_sf_vc_vvv_se_f64m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_f64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv2f64.nxv2f64.i32(i32 3, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv2f64.nxv2f64.i64(i64 3, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_f64m2(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) {
+  __riscv_sf_vc_vvv_se_f64m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_f64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv4f64.nxv4f64.i32(i32 3, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv4f64.nxv4f64.i64(i64 3, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_f64m4(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) {
+  __riscv_sf_vc_vvv_se_f64m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_f64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv8f64.nxv8f64.i32(i32 3, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv8f64.nxv8f64.i64(i64 3, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_f64m8(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) {
+  __riscv_sf_vc_vvv_se_f64m8(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv1f16.i32.nxv1f16.i32(i32 3, <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv1f16.i64.nxv1f16.i64(i64 3, <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_sf_vc_v_vvv_se_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvv_se_f16mf4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv2f16.i32.nxv2f16.i32(i32 3, <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv2f16.i64.nxv2f16.i64(i64 3, <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_sf_vc_v_vvv_se_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvv_se_f16mf2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv4f16.i32.nxv4f16.i32(i32 3, <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv4f16.i64.nxv4f16.i64(i64 3, <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_sf_vc_v_vvv_se_f16m1(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvv_se_f16m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv8f16.i32.nxv8f16.i32(i32 3, <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv8f16.i64.nxv8f16.i64(i64 3, <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_sf_vc_v_vvv_se_f16m2(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvv_se_f16m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv16f16.i32.nxv16f16.i32(i32 3, <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv16f16.i64.nxv16f16.i64(i64 3, <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_sf_vc_v_vvv_se_f16m4(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvv_se_f16m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_f16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv32f16.i32.nxv32f16.i32(i32 3, <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv32f16.i64.nxv32f16.i64(i64 3, <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_sf_vc_v_vvv_se_f16m8(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvv_se_f16m8(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv1f32.i32.nxv1f32.i32(i32 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv1f32.i64.nxv1f32.i64(i64 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_sf_vc_v_vvv_se_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvv_se_f32mf2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv2f32.i32.nxv2f32.i32(i32 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv2f32.i64.nxv2f32.i64(i64 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_sf_vc_v_vvv_se_f32m1(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvv_se_f32m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv4f32.i32.nxv4f32.i32(i32 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv4f32.i64.nxv4f32.i64(i64 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_sf_vc_v_vvv_se_f32m2(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvv_se_f32m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv8f32.i32.nxv8f32.i32(i32 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv8f32.i64.nxv8f32.i64(i64 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_sf_vc_v_vvv_se_f32m4(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvv_se_f32m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv16f32.i32.nxv16f32.i32(i32 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv16f32.i64.nxv16f32.i64(i64 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_sf_vc_v_vvv_se_f32m8(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvv_se_f32m8(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv1f64.i32.nxv1f64.i32(i32 3, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv1f64.i64.nxv1f64.i64(i64 3, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_sf_vc_v_vvv_se_f64m1(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvv_se_f64m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_f64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv2f64.i32.nxv2f64.i32(i32 3, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv2f64.i64.nxv2f64.i64(i64 3, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_sf_vc_v_vvv_se_f64m2(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvv_se_f64m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_f64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv4f64.i32.nxv4f64.i32(i32 3, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv4f64.i64.nxv4f64.i64(i64 3, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_sf_vc_v_vvv_se_f64m4(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvv_se_f64m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_f64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv8f64.i32.nxv8f64.i32(i32 3, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv8f64.i64.nxv8f64.i64(i64 3, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_sf_vc_v_vvv_se_f64m8(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvv_se_f64m8(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.vvv.nxv1f16.i32.nxv1f16.i32(i32 3, <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.vvv.nxv1f16.i64.nxv1f16.i64(i64 3, <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_sf_vc_v_vvv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvv_f16mf4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.vvv.nxv2f16.i32.nxv2f16.i32(i32 3, <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.vvv.nxv2f16.i64.nxv2f16.i64(i64 3, <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_sf_vc_v_vvv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvv_f16mf2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.vvv.nxv4f16.i32.nxv4f16.i32(i32 3, <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.vvv.nxv4f16.i64.nxv4f16.i64(i64 3, <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_sf_vc_v_vvv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvv_f16m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.vvv.nxv8f16.i32.nxv8f16.i32(i32 3, <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.vvv.nxv8f16.i64.nxv8f16.i64(i64 3, <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_sf_vc_v_vvv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvv_f16m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.vvv.nxv16f16.i32.nxv16f16.i32(i32 3, <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.vvv.nxv16f16.i64.nxv16f16.i64(i64 3, <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_sf_vc_v_vvv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvv_f16m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_f16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.vvv.nxv32f16.i32.nxv32f16.i32(i32 3, <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.vvv.nxv32f16.i64.nxv32f16.i64(i64 3, <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_sf_vc_v_vvv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvv_f16m8(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvv.nxv1f32.i32.nxv1f32.i32(i32 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvv.nxv1f32.i64.nxv1f32.i64(i64 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_sf_vc_v_vvv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvv_f32mf2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvv.nxv2f32.i32.nxv2f32.i32(i32 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvv.nxv2f32.i64.nxv2f32.i64(i64 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_sf_vc_v_vvv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvv_f32m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvv.nxv4f32.i32.nxv4f32.i32(i32 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvv.nxv4f32.i64.nxv4f32.i64(i64 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_sf_vc_v_vvv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvv_f32m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvv.nxv8f32.i32.nxv8f32.i32(i32 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvv.nxv8f32.i64.nxv8f32.i64(i64 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_sf_vc_v_vvv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvv_f32m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvv.nxv16f32.i32.nxv16f32.i32(i32 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvv.nxv16f32.i64.nxv16f32.i64(i64 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_sf_vc_v_vvv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvv_f32m8(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvv.nxv1f64.i32.nxv1f64.i32(i32 3, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvv.nxv1f64.i64.nxv1f64.i64(i64 3, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_sf_vc_v_vvv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvv_f64m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_f64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvv.nxv2f64.i32.nxv2f64.i32(i32 3, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvv.nxv2f64.i64.nxv2f64.i64(i64 3, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_sf_vc_v_vvv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvv_f64m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_f64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvv.nxv4f64.i32.nxv4f64.i32(i32 3, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvv.nxv4f64.i64.nxv4f64.i64(i64 3, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_sf_vc_v_vvv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvv_f64m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_f64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvv.nxv8f64.i32.nxv8f64.i32(i32 3, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvv.nxv8f64.i64.nxv8f64.i64(i64 3, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_sf_vc_v_vvv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvv_f64m8(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvv_se_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv1f16.i16.i32(i32 3, <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv1f16.i16.i64(i64 3, <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs2, uint16_t rs1, size_t vl) {
+  __riscv_sf_vc_xvv_se_f16mf4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvv_se_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv2f16.i16.i32(i32 3, <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv2f16.i16.i64(i64 3, <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs2, uint16_t rs1, size_t vl) {
+  __riscv_sf_vc_xvv_se_f16mf2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvv_se_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv4f16.i16.i32(i32 3, <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv4f16.i16.i64(i64 3, <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_f16m1(vfloat16m1_t vd, vfloat16m1_t vs2, uint16_t rs1, size_t vl) {
+  __riscv_sf_vc_xvv_se_f16m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvv_se_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv8f16.i16.i32(i32 3, <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv8f16.i16.i64(i64 3, <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_f16m2(vfloat16m2_t vd, vfloat16m2_t vs2, uint16_t rs1, size_t vl) {
+  __riscv_sf_vc_xvv_se_f16m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvv_se_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv16f16.i16.i32(i32 3, <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv16f16.i16.i64(i64 3, <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_f16m4(vfloat16m4_t vd, vfloat16m4_t vs2, uint16_t rs1, size_t vl) {
+  __riscv_sf_vc_xvv_se_f16m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvv_se_f16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv32f16.i16.i32(i32 3, <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv32f16.i16.i64(i64 3, <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_f16m8(vfloat16m8_t vd, vfloat16m8_t vs2, uint16_t rs1, size_t vl) {
+  __riscv_sf_vc_xvv_se_f16m8(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvv_se_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv1f32.i32.i32(i32 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv1f32.i32.i64(i64 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs2, uint32_t rs1, size_t vl) {
+  __riscv_sf_vc_xvv_se_f32mf2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvv_se_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv2f32.i32.i32(i32 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv2f32.i32.i64(i64 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_f32m1(vfloat32m1_t vd, vfloat32m1_t vs2, uint32_t rs1, size_t vl) {
+  __riscv_sf_vc_xvv_se_f32m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvv_se_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv4f32.i32.i32(i32 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv4f32.i32.i64(i64 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_f32m2(vfloat32m2_t vd, vfloat32m2_t vs2, uint32_t rs1, size_t vl) {
+  __riscv_sf_vc_xvv_se_f32m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvv_se_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv8f32.i32.i32(i32 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv8f32.i32.i64(i64 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_f32m4(vfloat32m4_t vd, vfloat32m4_t vs2, uint32_t rs1, size_t vl) {
+  __riscv_sf_vc_xvv_se_f32m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvv_se_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv16f32.i32.i32(i32 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv16f32.i32.i64(i64 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_f32m8(vfloat32m8_t vd, vfloat32m8_t vs2, uint32_t rs1, size_t vl) {
+  __riscv_sf_vc_xvv_se_f32m8(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv1f16.i32.i16.i32(i32 3, <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv1f16.i64.i16.i64(i64 3, <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_sf_vc_v_xvv_se_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvv_se_f16mf4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv2f16.i32.i16.i32(i32 3, <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv2f16.i64.i16.i64(i64 3, <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_sf_vc_v_xvv_se_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvv_se_f16mf2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv4f16.i32.i16.i32(i32 3, <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv4f16.i64.i16.i64(i64 3, <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_sf_vc_v_xvv_se_f16m1(vfloat16m1_t vd, vfloat16m1_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvv_se_f16m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv8f16.i32.i16.i32(i32 3, <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv8f16.i64.i16.i64(i64 3, <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_sf_vc_v_xvv_se_f16m2(vfloat16m2_t vd, vfloat16m2_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvv_se_f16m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv16f16.i32.i16.i32(i32 3, <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv16f16.i64.i16.i64(i64 3, <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_sf_vc_v_xvv_se_f16m4(vfloat16m4_t vd, vfloat16m4_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvv_se_f16m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_f16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv32f16.i32.i16.i32(i32 3, <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv32f16.i64.i16.i64(i64 3, <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_sf_vc_v_xvv_se_f16m8(vfloat16m8_t vd, vfloat16m8_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvv_se_f16m8(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv1f32.i32.i32.i32(i32 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv1f32.i64.i32.i64(i64 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_sf_vc_v_xvv_se_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvv_se_f32mf2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv2f32.i32.i32.i32(i32 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv2f32.i64.i32.i64(i64 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_sf_vc_v_xvv_se_f32m1(vfloat32m1_t vd, vfloat32m1_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvv_se_f32m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv4f32.i32.i32.i32(i32 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv4f32.i64.i32.i64(i64 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_sf_vc_v_xvv_se_f32m2(vfloat32m2_t vd, vfloat32m2_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvv_se_f32m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv8f32.i32.i32.i32(i32 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv8f32.i64.i32.i64(i64 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_sf_vc_v_xvv_se_f32m4(vfloat32m4_t vd, vfloat32m4_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvv_se_f32m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv16f32.i32.i32.i32(i32 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv16f32.i64.i32.i64(i64 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_sf_vc_v_xvv_se_f32m8(vfloat32m8_t vd, vfloat32m8_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvv_se_f32m8(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.xvv.nxv1f16.i32.i16.i32(i32 3, <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.xvv.nxv1f16.i64.i16.i64(i64 3, <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_sf_vc_v_xvv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvv_f16mf4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.xvv.nxv2f16.i32.i16.i32(i32 3, <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.xvv.nxv2f16.i64.i16.i64(i64 3, <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_sf_vc_v_xvv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvv_f16mf2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.xvv.nxv4f16.i32.i16.i32(i32 3, <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.xvv.nxv4f16.i64.i16.i64(i64 3, <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_sf_vc_v_xvv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvv_f16m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.xvv.nxv8f16.i32.i16.i32(i32 3, <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.xvv.nxv8f16.i64.i16.i64(i64 3, <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_sf_vc_v_xvv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvv_f16m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.xvv.nxv16f16.i32.i16.i32(i32 3, <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.xvv.nxv16f16.i64.i16.i64(i64 3, <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_sf_vc_v_xvv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvv_f16m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_f16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.xvv.nxv32f16.i32.i16.i32(i32 3, <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.xvv.nxv32f16.i64.i16.i64(i64 3, <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_sf_vc_v_xvv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvv_f16m8(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvv.nxv1f32.i32.i32.i32(i32 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvv.nxv1f32.i64.i32.i64(i64 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_sf_vc_v_xvv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvv_f32mf2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvv.nxv2f32.i32.i32.i32(i32 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvv.nxv2f32.i64.i32.i64(i64 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_sf_vc_v_xvv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvv_f32m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvv.nxv4f32.i32.i32.i32(i32 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvv.nxv4f32.i64.i32.i64(i64 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_sf_vc_v_xvv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvv_f32m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvv.nxv8f32.i32.i32.i32(i32 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvv.nxv8f32.i64.i32.i64(i64 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_sf_vc_v_xvv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvv_f32m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvv.nxv16f32.i32.i32.i32(i32 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvv.nxv16f32.i64.i32.i64(i64 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_sf_vc_v_xvv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvv_f32m8(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv1f16.i32.i32(i32 3, <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv1f16.i64.i64(i64 3, <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) {
+  __riscv_sf_vc_ivv_se_f16mf4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv2f16.i32.i32(i32 3, <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv2f16.i64.i64(i64 3, <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) {
+  __riscv_sf_vc_ivv_se_f16mf2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv4f16.i32.i32(i32 3, <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv4f16.i64.i64(i64 3, <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_f16m1(vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) {
+  __riscv_sf_vc_ivv_se_f16m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv8f16.i32.i32(i32 3, <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv8f16.i64.i64(i64 3, <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_f16m2(vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) {
+  __riscv_sf_vc_ivv_se_f16m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv16f16.i32.i32(i32 3, <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv16f16.i64.i64(i64 3, <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_f16m4(vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) {
+  __riscv_sf_vc_ivv_se_f16m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_f16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv32f16.i32.i32(i32 3, <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv32f16.i64.i64(i64 3, <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_f16m8(vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) {
+  __riscv_sf_vc_ivv_se_f16m8(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv1f32.i32.i32(i32 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv1f32.i64.i64(i64 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) {
+  __riscv_sf_vc_ivv_se_f32mf2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv2f32.i32.i32(i32 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv2f32.i64.i64(i64 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_f32m1(vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) {
+  __riscv_sf_vc_ivv_se_f32m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv4f32.i32.i32(i32 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv4f32.i64.i64(i64 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_f32m2(vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) {
+  __riscv_sf_vc_ivv_se_f32m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv8f32.i32.i32(i32 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv8f32.i64.i64(i64 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_f32m4(vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) {
+  __riscv_sf_vc_ivv_se_f32m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv16f32.i32.i32(i32 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv16f32.i64.i64(i64 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_f32m8(vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) {
+  __riscv_sf_vc_ivv_se_f32m8(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv1f64.i32.i32(i32 3, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv1f64.i64.i64(i64 3, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_f64m1(vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) {
+  __riscv_sf_vc_ivv_se_f64m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_f64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv2f64.i32.i32(i32 3, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv2f64.i64.i64(i64 3, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_f64m2(vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) {
+  __riscv_sf_vc_ivv_se_f64m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_f64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv4f64.i32.i32(i32 3, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv4f64.i64.i64(i64 3, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_f64m4(vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) {
+  __riscv_sf_vc_ivv_se_f64m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_f64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv8f64.i32.i32(i32 3, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv8f64.i64.i64(i64 3, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_f64m8(vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) {
+  __riscv_sf_vc_ivv_se_f64m8(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv1f16.i32.i32.i32(i32 3, <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv1f16.i64.i64.i64(i64 3, <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_sf_vc_v_ivv_se_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivv_se_f16mf4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv2f16.i32.i32.i32(i32 3, <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv2f16.i64.i64.i64(i64 3, <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_sf_vc_v_ivv_se_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivv_se_f16mf2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv4f16.i32.i32.i32(i32 3, <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv4f16.i64.i64.i64(i64 3, <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_sf_vc_v_ivv_se_f16m1(vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivv_se_f16m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv8f16.i32.i32.i32(i32 3, <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv8f16.i64.i64.i64(i64 3, <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_sf_vc_v_ivv_se_f16m2(vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivv_se_f16m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv16f16.i32.i32.i32(i32 3, <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv16f16.i64.i64.i64(i64 3, <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_sf_vc_v_ivv_se_f16m4(vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivv_se_f16m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_f16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv32f16.i32.i32.i32(i32 3, <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv32f16.i64.i64.i64(i64 3, <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_sf_vc_v_ivv_se_f16m8(vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivv_se_f16m8(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv1f32.i32.i32.i32(i32 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv1f32.i64.i64.i64(i64 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_sf_vc_v_ivv_se_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivv_se_f32mf2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv2f32.i32.i32.i32(i32 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv2f32.i64.i64.i64(i64 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_sf_vc_v_ivv_se_f32m1(vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivv_se_f32m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv4f32.i32.i32.i32(i32 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv4f32.i64.i64.i64(i64 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_sf_vc_v_ivv_se_f32m2(vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivv_se_f32m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv8f32.i32.i32.i32(i32 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv8f32.i64.i64.i64(i64 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_sf_vc_v_ivv_se_f32m4(vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivv_se_f32m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv16f32.i32.i32.i32(i32 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv16f32.i64.i64.i64(i64 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_sf_vc_v_ivv_se_f32m8(vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivv_se_f32m8(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivv.se.nxv1f64.i32.i32.i32(i32 3, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivv.se.nxv1f64.i64.i64.i64(i64 3, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_sf_vc_v_ivv_se_f64m1(vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivv_se_f64m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_f64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivv.se.nxv2f64.i32.i32.i32(i32 3, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivv.se.nxv2f64.i64.i64.i64(i64 3, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_sf_vc_v_ivv_se_f64m2(vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivv_se_f64m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_f64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivv.se.nxv4f64.i32.i32.i32(i32 3, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivv.se.nxv4f64.i64.i64.i64(i64 3, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_sf_vc_v_ivv_se_f64m4(vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivv_se_f64m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_f64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivv.se.nxv8f64.i32.i32.i32(i32 3, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivv.se.nxv8f64.i64.i64.i64(i64 3, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_sf_vc_v_ivv_se_f64m8(vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivv_se_f64m8(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.ivv.nxv1f16.i32.i32.i32(i32 3, <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.ivv.nxv1f16.i64.i64.i64(i64 3, <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_sf_vc_v_ivv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivv_f16mf4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.ivv.nxv2f16.i32.i32.i32(i32 3, <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.ivv.nxv2f16.i64.i64.i64(i64 3, <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_sf_vc_v_ivv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivv_f16mf2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.ivv.nxv4f16.i32.i32.i32(i32 3, <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.ivv.nxv4f16.i64.i64.i64(i64 3, <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_sf_vc_v_ivv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivv_f16m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.ivv.nxv8f16.i32.i32.i32(i32 3, <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.ivv.nxv8f16.i64.i64.i64(i64 3, <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_sf_vc_v_ivv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivv_f16m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.ivv.nxv16f16.i32.i32.i32(i32 3, <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.ivv.nxv16f16.i64.i64.i64(i64 3, <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_sf_vc_v_ivv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivv_f16m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_f16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.ivv.nxv32f16.i32.i32.i32(i32 3, <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.ivv.nxv32f16.i64.i64.i64(i64 3, <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_sf_vc_v_ivv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivv_f16m8(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivv.nxv1f32.i32.i32.i32(i32 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivv.nxv1f32.i64.i64.i64(i64 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_sf_vc_v_ivv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivv_f32mf2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivv.nxv2f32.i32.i32.i32(i32 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivv.nxv2f32.i64.i64.i64(i64 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_sf_vc_v_ivv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivv_f32m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivv.nxv4f32.i32.i32.i32(i32 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivv.nxv4f32.i64.i64.i64(i64 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_sf_vc_v_ivv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivv_f32m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivv.nxv8f32.i32.i32.i32(i32 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivv.nxv8f32.i64.i64.i64(i64 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_sf_vc_v_ivv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivv_f32m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivv.nxv16f32.i32.i32.i32(i32 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivv.nxv16f32.i64.i64.i64(i64 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_sf_vc_v_ivv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivv_f32m8(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivv.nxv1f64.i32.i32.i32(i32 3, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivv.nxv1f64.i64.i64.i64(i64 3, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_sf_vc_v_ivv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivv_f64m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_f64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivv.nxv2f64.i32.i32.i32(i32 3, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivv.nxv2f64.i64.i64.i64(i64 3, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_sf_vc_v_ivv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivv_f64m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_f64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivv.nxv4f64.i32.i32.i32(i32 3, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivv.nxv4f64.i64.i64.i64(i64 3, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_sf_vc_v_ivv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivv_f64m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_f64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivv.nxv8f64.i32.i32.i32(i32 3, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivv.nxv8f64.i64.i64.i64(i64 3, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_sf_vc_v_ivv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivv_f64m8(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvv_se_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv1f16.f16.i32(i32 1, <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvv_se_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv1f16.f16.i64(i64 1, <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvv_se_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 fs1, size_t vl) {
+  __riscv_sf_vc_fvv_se_f16mf4(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvv_se_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv2f16.f16.i32(i32 1, <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvv_se_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv2f16.f16.i64(i64 1, <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvv_se_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 fs1, size_t vl) {
+  __riscv_sf_vc_fvv_se_f16mf2(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvv_se_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv4f16.f16.i32(i32 1, <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvv_se_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv4f16.f16.i64(i64 1, <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvv_se_f16m1(vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 fs1, size_t vl) {
+  __riscv_sf_vc_fvv_se_f16m1(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvv_se_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv8f16.f16.i32(i32 1, <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvv_se_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv8f16.f16.i64(i64 1, <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvv_se_f16m2(vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 fs1, size_t vl) {
+  __riscv_sf_vc_fvv_se_f16m2(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvv_se_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv16f16.f16.i32(i32 1, <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvv_se_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv16f16.f16.i64(i64 1, <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvv_se_f16m4(vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 fs1, size_t vl) {
+  __riscv_sf_vc_fvv_se_f16m4(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvv_se_f16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv32f16.f16.i32(i32 1, <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvv_se_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv32f16.f16.i64(i64 1, <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvv_se_f16m8(vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 fs1, size_t vl) {
+  __riscv_sf_vc_fvv_se_f16m8(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvv_se_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv1f32.f32.i32(i32 1, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvv_se_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv1f32.f32.i64(i64 1, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvv_se_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs2, float fs1, size_t vl) {
+  __riscv_sf_vc_fvv_se_f32mf2(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvv_se_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv2f32.f32.i32(i32 1, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvv_se_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv2f32.f32.i64(i64 1, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvv_se_f32m1(vfloat32m1_t vd, vfloat32m1_t vs2, float fs1, size_t vl) {
+  __riscv_sf_vc_fvv_se_f32m1(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvv_se_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv4f32.f32.i32(i32 1, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvv_se_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv4f32.f32.i64(i64 1, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvv_se_f32m2(vfloat32m2_t vd, vfloat32m2_t vs2, float fs1, size_t vl) {
+  __riscv_sf_vc_fvv_se_f32m2(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvv_se_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv8f32.f32.i32(i32 1, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvv_se_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv8f32.f32.i64(i64 1, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvv_se_f32m4(vfloat32m4_t vd, vfloat32m4_t vs2, float fs1, size_t vl) {
+  __riscv_sf_vc_fvv_se_f32m4(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvv_se_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv16f32.f32.i32(i32 1, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvv_se_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv16f32.f32.i64(i64 1, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvv_se_f32m8(vfloat32m8_t vd, vfloat32m8_t vs2, float fs1, size_t vl) {
+  __riscv_sf_vc_fvv_se_f32m8(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvv_se_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv1f64.f64.i32(i32 1, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvv_se_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv1f64.f64.i64(i64 1, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvv_se_f64m1(vfloat64m1_t vd, vfloat64m1_t vs2, double fs1, size_t vl) {
+  __riscv_sf_vc_fvv_se_f64m1(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvv_se_f64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv2f64.f64.i32(i32 1, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvv_se_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv2f64.f64.i64(i64 1, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvv_se_f64m2(vfloat64m2_t vd, vfloat64m2_t vs2, double fs1, size_t vl) {
+  __riscv_sf_vc_fvv_se_f64m2(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvv_se_f64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv4f64.f64.i32(i32 1, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvv_se_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv4f64.f64.i64(i64 1, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvv_se_f64m4(vfloat64m4_t vd, vfloat64m4_t vs2, double fs1, size_t vl) {
+  __riscv_sf_vc_fvv_se_f64m4(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvv_se_f64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv8f64.f64.i32(i32 1, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvv_se_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv8f64.f64.i64(i64 1, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvv_se_f64m8(vfloat64m8_t vd, vfloat64m8_t vs2, double fs1, size_t vl) {
+  __riscv_sf_vc_fvv_se_f64m8(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv1f16.i32.f16.i32(i32 1, <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv1f16.i64.f16.i64(i64 1, <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_sf_vc_v_fvv_se_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvv_se_f16mf4(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv2f16.i32.f16.i32(i32 1, <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv2f16.i64.f16.i64(i64 1, <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_sf_vc_v_fvv_se_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvv_se_f16mf2(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv4f16.i32.f16.i32(i32 1, <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv4f16.i64.f16.i64(i64 1, <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_sf_vc_v_fvv_se_f16m1(vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvv_se_f16m1(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv8f16.i32.f16.i32(i32 1, <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv8f16.i64.f16.i64(i64 1, <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_sf_vc_v_fvv_se_f16m2(vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvv_se_f16m2(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv16f16.i32.f16.i32(i32 1, <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv16f16.i64.f16.i64(i64 1, <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_sf_vc_v_fvv_se_f16m4(vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvv_se_f16m4(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_f16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv32f16.i32.f16.i32(i32 1, <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv32f16.i64.f16.i64(i64 1, <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_sf_vc_v_fvv_se_f16m8(vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvv_se_f16m8(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv1f32.i32.f32.i32(i32 1, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv1f32.i64.f32.i64(i64 1, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_sf_vc_v_fvv_se_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs2, float fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvv_se_f32mf2(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv2f32.i32.f32.i32(i32 1, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv2f32.i64.f32.i64(i64 1, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_sf_vc_v_fvv_se_f32m1(vfloat32m1_t vd, vfloat32m1_t vs2, float fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvv_se_f32m1(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv4f32.i32.f32.i32(i32 1, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv4f32.i64.f32.i64(i64 1, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_sf_vc_v_fvv_se_f32m2(vfloat32m2_t vd, vfloat32m2_t vs2, float fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvv_se_f32m2(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv8f32.i32.f32.i32(i32 1, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv8f32.i64.f32.i64(i64 1, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_sf_vc_v_fvv_se_f32m4(vfloat32m4_t vd, vfloat32m4_t vs2, float fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvv_se_f32m4(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv16f32.i32.f32.i32(i32 1, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv16f32.i64.f32.i64(i64 1, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_sf_vc_v_fvv_se_f32m8(vfloat32m8_t vd, vfloat32m8_t vs2, float fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvv_se_f32m8(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvv.se.nxv1f64.i32.f64.i32(i32 1, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvv.se.nxv1f64.i64.f64.i64(i64 1, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_sf_vc_v_fvv_se_f64m1(vfloat64m1_t vd, vfloat64m1_t vs2, double fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvv_se_f64m1(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_f64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvv.se.nxv2f64.i32.f64.i32(i32 1, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvv.se.nxv2f64.i64.f64.i64(i64 1, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_sf_vc_v_fvv_se_f64m2(vfloat64m2_t vd, vfloat64m2_t vs2, double fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvv_se_f64m2(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_f64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvv.se.nxv4f64.i32.f64.i32(i32 1, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvv.se.nxv4f64.i64.f64.i64(i64 1, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_sf_vc_v_fvv_se_f64m4(vfloat64m4_t vd, vfloat64m4_t vs2, double fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvv_se_f64m4(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_f64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvv.se.nxv8f64.i32.f64.i32(i32 1, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvv.se.nxv8f64.i64.f64.i64(i64 1, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_sf_vc_v_fvv_se_f64m8(vfloat64m8_t vd, vfloat64m8_t vs2, double fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvv_se_f64m8(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.fvv.nxv1f16.i32.f16.i32(i32 1, <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.sf.vc.v.fvv.nxv1f16.i64.f16.i64(i64 1, <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_sf_vc_v_fvv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvv_f16mf4(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.fvv.nxv2f16.i32.f16.i32(i32 1, <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.sf.vc.v.fvv.nxv2f16.i64.f16.i64(i64 1, <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_sf_vc_v_fvv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvv_f16mf2(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.fvv.nxv4f16.i32.f16.i32(i32 1, <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.sf.vc.v.fvv.nxv4f16.i64.f16.i64(i64 1, <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_sf_vc_v_fvv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvv_f16m1(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.fvv.nxv8f16.i32.f16.i32(i32 1, <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.sf.vc.v.fvv.nxv8f16.i64.f16.i64(i64 1, <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_sf_vc_v_fvv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvv_f16m2(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.fvv.nxv16f16.i32.f16.i32(i32 1, <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.sf.vc.v.fvv.nxv16f16.i64.f16.i64(i64 1, <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_sf_vc_v_fvv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvv_f16m4(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_f16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.fvv.nxv32f16.i32.f16.i32(i32 1, <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.sf.vc.v.fvv.nxv32f16.i64.f16.i64(i64 1, <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_sf_vc_v_fvv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvv_f16m8(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvv.nxv1f32.i32.f32.i32(i32 1, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvv.nxv1f32.i64.f32.i64(i64 1, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_sf_vc_v_fvv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs2, float fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvv_f32mf2(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvv.nxv2f32.i32.f32.i32(i32 1, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvv.nxv2f32.i64.f32.i64(i64 1, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_sf_vc_v_fvv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs2, float fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvv_f32m1(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvv.nxv4f32.i32.f32.i32(i32 1, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvv.nxv4f32.i64.f32.i64(i64 1, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_sf_vc_v_fvv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs2, float fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvv_f32m2(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvv.nxv8f32.i32.f32.i32(i32 1, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvv.nxv8f32.i64.f32.i64(i64 1, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_sf_vc_v_fvv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs2, float fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvv_f32m4(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvv.nxv16f32.i32.f32.i32(i32 1, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvv.nxv16f32.i64.f32.i64(i64 1, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_sf_vc_v_fvv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs2, float fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvv_f32m8(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvv.nxv1f64.i32.f64.i32(i32 1, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvv.nxv1f64.i64.f64.i64(i64 1, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_sf_vc_v_fvv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs2, double fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvv_f64m1(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_f64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvv.nxv2f64.i32.f64.i32(i32 1, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvv.nxv2f64.i64.f64.i64(i64 1, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_sf_vc_v_fvv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs2, double fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvv_f64m2(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_f64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvv.nxv4f64.i32.f64.i32(i32 1, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvv.nxv4f64.i64.f64.i64(i64 1, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_sf_vc_v_fvv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs2, double fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvv_f64m4(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_f64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvv.nxv8f64.i32.f64.i32(i32 1, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvv.nxv8f64.i64.f64.i64(i64 1, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_sf_vc_v_fvv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs2, double fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvv_f64m8(p26, vd, vs2, fs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvw.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvw.c
index 23ee2b7bb0f4ed6..9d447fd51122ac1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvw.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvw.c
@@ -1,7 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv32 -target-feature +v -target-feature +zfh -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
-// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv32 -target-feature +v -target-feature +zvfh -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvfh -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
 
 #include <sifive_vector.h>
 
@@ -2276,3 +2276,1515 @@ vuint64m4_t test_sf_vc_v_fvw_u32m2(vuint64m4_t vd, vuint32m2_t vs2, float fs1, s
 vuint64m8_t test_sf_vc_v_fvw_u32m4(vuint64m8_t vd, vuint32m4_t vs2, float fs1, size_t vl) {
   return __riscv_sf_vc_v_fvw_u32m4(p26, vd, vs2, fs1, vl);
 }
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvw_se_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.i32.nxv1f32.nxv1f16.nxv1f16.i32(i32 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvw_se_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.i64.nxv1f32.nxv1f16.nxv1f16.i64(i64 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvw_se_f16mf4(vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) {
+  __riscv_sf_vc_vvw_se_f16mf4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvw_se_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.i32.nxv2f32.nxv2f16.nxv2f16.i32(i32 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvw_se_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.i64.nxv2f32.nxv2f16.nxv2f16.i64(i64 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvw_se_f16mf2(vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) {
+  __riscv_sf_vc_vvw_se_f16mf2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvw_se_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.i32.nxv4f32.nxv4f16.nxv4f16.i32(i32 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvw_se_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.i64.nxv4f32.nxv4f16.nxv4f16.i64(i64 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvw_se_f16m1(vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) {
+  __riscv_sf_vc_vvw_se_f16m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvw_se_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.i32.nxv8f32.nxv8f16.nxv8f16.i32(i32 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvw_se_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.i64.nxv8f32.nxv8f16.nxv8f16.i64(i64 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvw_se_f16m2(vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) {
+  __riscv_sf_vc_vvw_se_f16m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvw_se_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.i32.nxv16f32.nxv16f16.nxv16f16.i32(i32 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvw_se_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.i64.nxv16f32.nxv16f16.nxv16f16.i64(i64 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvw_se_f16m4(vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) {
+  __riscv_sf_vc_vvw_se_f16m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvw_se_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.i32.nxv1f64.nxv1f32.nxv1f32.i32(i32 3, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvw_se_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.i64.nxv1f64.nxv1f32.nxv1f32.i64(i64 3, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvw_se_f32mf2(vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) {
+  __riscv_sf_vc_vvw_se_f32mf2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvw_se_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.i32.nxv2f64.nxv2f32.nxv2f32.i32(i32 3, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvw_se_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.i64.nxv2f64.nxv2f32.nxv2f32.i64(i64 3, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvw_se_f32m1(vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) {
+  __riscv_sf_vc_vvw_se_f32m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvw_se_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.i32.nxv4f64.nxv4f32.nxv4f32.i32(i32 3, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvw_se_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.i64.nxv4f64.nxv4f32.nxv4f32.i64(i64 3, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvw_se_f32m2(vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) {
+  __riscv_sf_vc_vvw_se_f32m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvw_se_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.i32.nxv8f64.nxv8f32.nxv8f32.i32(i32 3, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvw_se_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.i64.nxv8f64.nxv8f32.nxv8f32.i64(i64 3, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvw_se_f32m4(vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) {
+  __riscv_sf_vc_vvw_se_f32m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv1f32.i32.nxv1f16.nxv1f16.i32(i32 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv1f32.i64.nxv1f16.nxv1f16.i64(i64 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_sf_vc_v_vvw_se_f16mf4(vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvw_se_f16mf4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv2f32.i32.nxv2f16.nxv2f16.i32(i32 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv2f32.i64.nxv2f16.nxv2f16.i64(i64 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_sf_vc_v_vvw_se_f16mf2(vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvw_se_f16mf2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv4f32.i32.nxv4f16.nxv4f16.i32(i32 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv4f32.i64.nxv4f16.nxv4f16.i64(i64 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_sf_vc_v_vvw_se_f16m1(vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvw_se_f16m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv8f32.i32.nxv8f16.nxv8f16.i32(i32 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv8f32.i64.nxv8f16.nxv8f16.i64(i64 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_sf_vc_v_vvw_se_f16m2(vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvw_se_f16m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv16f32.i32.nxv16f16.nxv16f16.i32(i32 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv16f32.i64.nxv16f16.nxv16f16.i64(i64 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_sf_vc_v_vvw_se_f16m4(vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvw_se_f16m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv1f64.i32.nxv1f32.nxv1f32.i32(i32 3, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv1f64.i64.nxv1f32.nxv1f32.i64(i64 3, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_sf_vc_v_vvw_se_f32mf2(vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvw_se_f32mf2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv2f64.i32.nxv2f32.nxv2f32.i32(i32 3, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv2f64.i64.nxv2f32.nxv2f32.i64(i64 3, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_sf_vc_v_vvw_se_f32m1(vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvw_se_f32m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv4f64.i32.nxv4f32.nxv4f32.i32(i32 3, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv4f64.i64.nxv4f32.nxv4f32.i64(i64 3, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_sf_vc_v_vvw_se_f32m2(vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvw_se_f32m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv8f64.i32.nxv8f32.nxv8f32.i32(i32 3, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv8f64.i64.nxv8f32.nxv8f32.i64(i64 3, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_sf_vc_v_vvw_se_f32m4(vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvw_se_f32m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvw.nxv1f32.i32.nxv1f16.nxv1f16.i32(i32 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvw.nxv1f32.i64.nxv1f16.nxv1f16.i64(i64 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_sf_vc_v_vvw_f16mf4(vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvw_f16mf4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvw.nxv2f32.i32.nxv2f16.nxv2f16.i32(i32 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvw.nxv2f32.i64.nxv2f16.nxv2f16.i64(i64 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_sf_vc_v_vvw_f16mf2(vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvw_f16mf2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvw.nxv4f32.i32.nxv4f16.nxv4f16.i32(i32 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvw.nxv4f32.i64.nxv4f16.nxv4f16.i64(i64 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_sf_vc_v_vvw_f16m1(vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvw_f16m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvw.nxv8f32.i32.nxv8f16.nxv8f16.i32(i32 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvw.nxv8f32.i64.nxv8f16.nxv8f16.i64(i64 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_sf_vc_v_vvw_f16m2(vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvw_f16m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvw.nxv16f32.i32.nxv16f16.nxv16f16.i32(i32 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvw.nxv16f32.i64.nxv16f16.nxv16f16.i64(i64 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_sf_vc_v_vvw_f16m4(vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvw_f16m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvw.nxv1f64.i32.nxv1f32.nxv1f32.i32(i32 3, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvw.nxv1f64.i64.nxv1f32.nxv1f32.i64(i64 3, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_sf_vc_v_vvw_f32mf2(vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvw_f32mf2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvw.nxv2f64.i32.nxv2f32.nxv2f32.i32(i32 3, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvw.nxv2f64.i64.nxv2f32.nxv2f32.i64(i64 3, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_sf_vc_v_vvw_f32m1(vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvw_f32m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvw.nxv4f64.i32.nxv4f32.nxv4f32.i32(i32 3, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvw.nxv4f64.i64.nxv4f32.nxv4f32.i64(i64 3, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_sf_vc_v_vvw_f32m2(vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvw_f32m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvw.nxv8f64.i32.nxv8f32.nxv8f32.i32(i32 3, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvw.nxv8f64.i64.nxv8f32.nxv8f32.i64(i64 3, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_sf_vc_v_vvw_f32m4(vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) {
+  return __riscv_sf_vc_v_vvw_f32m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvw_se_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.i32.nxv1f32.nxv1f16.i16.i32(i32 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvw_se_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.i64.nxv1f32.nxv1f16.i16.i64(i64 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvw_se_f16mf4(vfloat32mf2_t vd, vfloat16mf4_t vs2, uint16_t rs1, size_t vl) {
+  __riscv_sf_vc_xvw_se_f16mf4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvw_se_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.i32.nxv2f32.nxv2f16.i16.i32(i32 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvw_se_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.i64.nxv2f32.nxv2f16.i16.i64(i64 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvw_se_f16mf2(vfloat32m1_t vd, vfloat16mf2_t vs2, uint16_t rs1, size_t vl) {
+  __riscv_sf_vc_xvw_se_f16mf2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvw_se_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.i32.nxv4f32.nxv4f16.i16.i32(i32 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvw_se_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.i64.nxv4f32.nxv4f16.i16.i64(i64 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvw_se_f16m1(vfloat32m2_t vd, vfloat16m1_t vs2, uint16_t rs1, size_t vl) {
+  __riscv_sf_vc_xvw_se_f16m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvw_se_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.i32.nxv8f32.nxv8f16.i16.i32(i32 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvw_se_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.i64.nxv8f32.nxv8f16.i16.i64(i64 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvw_se_f16m2(vfloat32m4_t vd, vfloat16m2_t vs2, uint16_t rs1, size_t vl) {
+  __riscv_sf_vc_xvw_se_f16m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvw_se_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.i32.nxv16f32.nxv16f16.i16.i32(i32 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvw_se_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.i64.nxv16f32.nxv16f16.i16.i64(i64 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvw_se_f16m4(vfloat32m8_t vd, vfloat16m4_t vs2, uint16_t rs1, size_t vl) {
+  __riscv_sf_vc_xvw_se_f16m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvw_se_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.i32.nxv1f64.nxv1f32.i32.i32(i32 3, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvw_se_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.i64.nxv1f64.nxv1f32.i32.i64(i64 3, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvw_se_f32mf2(vfloat64m1_t vd, vfloat32mf2_t vs2, uint32_t rs1, size_t vl) {
+  __riscv_sf_vc_xvw_se_f32mf2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvw_se_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.i32.nxv2f64.nxv2f32.i32.i32(i32 3, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvw_se_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.i64.nxv2f64.nxv2f32.i32.i64(i64 3, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvw_se_f32m1(vfloat64m2_t vd, vfloat32m1_t vs2, uint32_t rs1, size_t vl) {
+  __riscv_sf_vc_xvw_se_f32m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvw_se_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.i32.nxv4f64.nxv4f32.i32.i32(i32 3, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvw_se_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.i64.nxv4f64.nxv4f32.i32.i64(i64 3, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvw_se_f32m2(vfloat64m4_t vd, vfloat32m2_t vs2, uint32_t rs1, size_t vl) {
+  __riscv_sf_vc_xvw_se_f32m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvw_se_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.i32.nxv8f64.nxv8f32.i32.i32(i32 3, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvw_se_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.i64.nxv8f64.nxv8f32.i32.i64(i64 3, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvw_se_f32m4(vfloat64m8_t vd, vfloat32m4_t vs2, uint32_t rs1, size_t vl) {
+  __riscv_sf_vc_xvw_se_f32m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv1f32.i32.nxv1f16.i16.i32(i32 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv1f32.i64.nxv1f16.i16.i64(i64 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_sf_vc_v_xvw_se_f16mf4(vfloat32mf2_t vd, vfloat16mf4_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvw_se_f16mf4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv2f32.i32.nxv2f16.i16.i32(i32 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv2f32.i64.nxv2f16.i16.i64(i64 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_sf_vc_v_xvw_se_f16mf2(vfloat32m1_t vd, vfloat16mf2_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvw_se_f16mf2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv4f32.i32.nxv4f16.i16.i32(i32 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv4f32.i64.nxv4f16.i16.i64(i64 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_sf_vc_v_xvw_se_f16m1(vfloat32m2_t vd, vfloat16m1_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvw_se_f16m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv8f32.i32.nxv8f16.i16.i32(i32 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv8f32.i64.nxv8f16.i16.i64(i64 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_sf_vc_v_xvw_se_f16m2(vfloat32m4_t vd, vfloat16m2_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvw_se_f16m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv16f32.i32.nxv16f16.i16.i32(i32 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv16f32.i64.nxv16f16.i16.i64(i64 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_sf_vc_v_xvw_se_f16m4(vfloat32m8_t vd, vfloat16m4_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvw_se_f16m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv1f64.i32.nxv1f32.i32.i32(i32 3, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv1f64.i64.nxv1f32.i32.i64(i64 3, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_sf_vc_v_xvw_se_f32mf2(vfloat64m1_t vd, vfloat32mf2_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvw_se_f32mf2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv2f64.i32.nxv2f32.i32.i32(i32 3, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv2f64.i64.nxv2f32.i32.i64(i64 3, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_sf_vc_v_xvw_se_f32m1(vfloat64m2_t vd, vfloat32m1_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvw_se_f32m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv4f64.i32.nxv4f32.i32.i32(i32 3, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv4f64.i64.nxv4f32.i32.i64(i64 3, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_sf_vc_v_xvw_se_f32m2(vfloat64m4_t vd, vfloat32m2_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvw_se_f32m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv8f64.i32.nxv8f32.i32.i32(i32 3, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv8f64.i64.nxv8f32.i32.i64(i64 3, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_sf_vc_v_xvw_se_f32m4(vfloat64m8_t vd, vfloat32m4_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvw_se_f32m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvw.nxv1f32.i32.nxv1f16.i16.i32(i32 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvw.nxv1f32.i64.nxv1f16.i16.i64(i64 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_sf_vc_v_xvw_f16mf4(vfloat32mf2_t vd, vfloat16mf4_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvw_f16mf4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvw.nxv2f32.i32.nxv2f16.i16.i32(i32 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvw.nxv2f32.i64.nxv2f16.i16.i64(i64 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_sf_vc_v_xvw_f16mf2(vfloat32m1_t vd, vfloat16mf2_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvw_f16mf2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvw.nxv4f32.i32.nxv4f16.i16.i32(i32 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvw.nxv4f32.i64.nxv4f16.i16.i64(i64 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_sf_vc_v_xvw_f16m1(vfloat32m2_t vd, vfloat16m1_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvw_f16m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvw.nxv8f32.i32.nxv8f16.i16.i32(i32 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvw.nxv8f32.i64.nxv8f16.i16.i64(i64 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_sf_vc_v_xvw_f16m2(vfloat32m4_t vd, vfloat16m2_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvw_f16m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvw.nxv16f32.i32.nxv16f16.i16.i32(i32 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvw.nxv16f32.i64.nxv16f16.i16.i64(i64 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_sf_vc_v_xvw_f16m4(vfloat32m8_t vd, vfloat16m4_t vs2, uint16_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvw_f16m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.xvw.nxv1f64.i32.nxv1f32.i32.i32(i32 3, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.xvw.nxv1f64.i64.nxv1f32.i32.i64(i64 3, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_sf_vc_v_xvw_f32mf2(vfloat64m1_t vd, vfloat32mf2_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvw_f32mf2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.xvw.nxv2f64.i32.nxv2f32.i32.i32(i32 3, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.xvw.nxv2f64.i64.nxv2f32.i32.i64(i64 3, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_sf_vc_v_xvw_f32m1(vfloat64m2_t vd, vfloat32m1_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvw_f32m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.xvw.nxv4f64.i32.nxv4f32.i32.i32(i32 3, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.xvw.nxv4f64.i64.nxv4f32.i32.i64(i64 3, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_sf_vc_v_xvw_f32m2(vfloat64m4_t vd, vfloat32m2_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvw_f32m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.xvw.nxv8f64.i32.nxv8f32.i32.i32(i32 3, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.xvw.nxv8f64.i64.nxv8f32.i32.i64(i64 3, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_sf_vc_v_xvw_f32m4(vfloat64m8_t vd, vfloat32m4_t vs2, uint32_t rs1, size_t vl) {
+  return __riscv_sf_vc_v_xvw_f32m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivw_se_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.i32.nxv1f32.nxv1f16.i32.i32(i32 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivw_se_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.i64.nxv1f32.nxv1f16.i64.i64(i64 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivw_se_f16mf4(vfloat32mf2_t vd, vfloat16mf4_t vs2, size_t vl) {
+  __riscv_sf_vc_ivw_se_f16mf4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivw_se_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.i32.nxv2f32.nxv2f16.i32.i32(i32 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivw_se_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.i64.nxv2f32.nxv2f16.i64.i64(i64 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivw_se_f16mf2(vfloat32m1_t vd, vfloat16mf2_t vs2, size_t vl) {
+  __riscv_sf_vc_ivw_se_f16mf2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivw_se_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.i32.nxv4f32.nxv4f16.i32.i32(i32 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivw_se_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.i64.nxv4f32.nxv4f16.i64.i64(i64 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivw_se_f16m1(vfloat32m2_t vd, vfloat16m1_t vs2, size_t vl) {
+  __riscv_sf_vc_ivw_se_f16m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivw_se_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.i32.nxv8f32.nxv8f16.i32.i32(i32 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivw_se_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.i64.nxv8f32.nxv8f16.i64.i64(i64 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivw_se_f16m2(vfloat32m4_t vd, vfloat16m2_t vs2, size_t vl) {
+  __riscv_sf_vc_ivw_se_f16m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivw_se_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.i32.nxv16f32.nxv16f16.i32.i32(i32 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivw_se_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.i64.nxv16f32.nxv16f16.i64.i64(i64 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivw_se_f16m4(vfloat32m8_t vd, vfloat16m4_t vs2, size_t vl) {
+  __riscv_sf_vc_ivw_se_f16m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivw_se_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.i32.nxv1f64.nxv1f32.i32.i32(i32 3, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivw_se_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.i64.nxv1f64.nxv1f32.i64.i64(i64 3, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivw_se_f32mf2(vfloat64m1_t vd, vfloat32mf2_t vs2, size_t vl) {
+  __riscv_sf_vc_ivw_se_f32mf2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivw_se_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.i32.nxv2f64.nxv2f32.i32.i32(i32 3, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivw_se_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.i64.nxv2f64.nxv2f32.i64.i64(i64 3, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivw_se_f32m1(vfloat64m2_t vd, vfloat32m1_t vs2, size_t vl) {
+  __riscv_sf_vc_ivw_se_f32m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivw_se_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.i32.nxv4f64.nxv4f32.i32.i32(i32 3, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivw_se_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.i64.nxv4f64.nxv4f32.i64.i64(i64 3, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivw_se_f32m2(vfloat64m4_t vd, vfloat32m2_t vs2, size_t vl) {
+  __riscv_sf_vc_ivw_se_f32m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivw_se_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.i32.nxv8f64.nxv8f32.i32.i32(i32 3, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivw_se_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.i64.nxv8f64.nxv8f32.i64.i64(i64 3, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivw_se_f32m4(vfloat64m8_t vd, vfloat32m4_t vs2, size_t vl) {
+  __riscv_sf_vc_ivw_se_f32m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv1f32.i32.nxv1f16.i32.i32(i32 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv1f32.i64.nxv1f16.i64.i64(i64 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_sf_vc_v_ivw_se_f16mf4(vfloat32mf2_t vd, vfloat16mf4_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivw_se_f16mf4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv2f32.i32.nxv2f16.i32.i32(i32 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv2f32.i64.nxv2f16.i64.i64(i64 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_sf_vc_v_ivw_se_f16mf2(vfloat32m1_t vd, vfloat16mf2_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivw_se_f16mf2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv4f32.i32.nxv4f16.i32.i32(i32 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv4f32.i64.nxv4f16.i64.i64(i64 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_sf_vc_v_ivw_se_f16m1(vfloat32m2_t vd, vfloat16m1_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivw_se_f16m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv8f32.i32.nxv8f16.i32.i32(i32 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv8f32.i64.nxv8f16.i64.i64(i64 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_sf_vc_v_ivw_se_f16m2(vfloat32m4_t vd, vfloat16m2_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivw_se_f16m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv16f32.i32.nxv16f16.i32.i32(i32 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv16f32.i64.nxv16f16.i64.i64(i64 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_sf_vc_v_ivw_se_f16m4(vfloat32m8_t vd, vfloat16m4_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivw_se_f16m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv1f64.i32.nxv1f32.i32.i32(i32 3, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv1f64.i64.nxv1f32.i64.i64(i64 3, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_sf_vc_v_ivw_se_f32mf2(vfloat64m1_t vd, vfloat32mf2_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivw_se_f32mf2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv2f64.i32.nxv2f32.i32.i32(i32 3, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv2f64.i64.nxv2f32.i64.i64(i64 3, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_sf_vc_v_ivw_se_f32m1(vfloat64m2_t vd, vfloat32m1_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivw_se_f32m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv4f64.i32.nxv4f32.i32.i32(i32 3, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv4f64.i64.nxv4f32.i64.i64(i64 3, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_sf_vc_v_ivw_se_f32m2(vfloat64m4_t vd, vfloat32m2_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivw_se_f32m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv8f64.i32.nxv8f32.i32.i32(i32 3, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv8f64.i64.nxv8f32.i64.i64(i64 3, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_sf_vc_v_ivw_se_f32m4(vfloat64m8_t vd, vfloat32m4_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivw_se_f32m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivw.nxv1f32.i32.nxv1f16.i32.i32(i32 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivw.nxv1f32.i64.nxv1f16.i64.i64(i64 3, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_sf_vc_v_ivw_f16mf4(vfloat32mf2_t vd, vfloat16mf4_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivw_f16mf4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivw.nxv2f32.i32.nxv2f16.i32.i32(i32 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivw.nxv2f32.i64.nxv2f16.i64.i64(i64 3, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_sf_vc_v_ivw_f16mf2(vfloat32m1_t vd, vfloat16mf2_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivw_f16mf2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivw.nxv4f32.i32.nxv4f16.i32.i32(i32 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivw.nxv4f32.i64.nxv4f16.i64.i64(i64 3, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_sf_vc_v_ivw_f16m1(vfloat32m2_t vd, vfloat16m1_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivw_f16m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivw.nxv8f32.i32.nxv8f16.i32.i32(i32 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivw.nxv8f32.i64.nxv8f16.i64.i64(i64 3, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_sf_vc_v_ivw_f16m2(vfloat32m4_t vd, vfloat16m2_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivw_f16m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivw.nxv16f32.i32.nxv16f16.i32.i32(i32 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivw.nxv16f32.i64.nxv16f16.i64.i64(i64 3, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_sf_vc_v_ivw_f16m4(vfloat32m8_t vd, vfloat16m4_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivw_f16m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivw.nxv1f64.i32.nxv1f32.i32.i32(i32 3, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivw.nxv1f64.i64.nxv1f32.i64.i64(i64 3, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_sf_vc_v_ivw_f32mf2(vfloat64m1_t vd, vfloat32mf2_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivw_f32mf2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivw.nxv2f64.i32.nxv2f32.i32.i32(i32 3, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivw.nxv2f64.i64.nxv2f32.i64.i64(i64 3, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_sf_vc_v_ivw_f32m1(vfloat64m2_t vd, vfloat32m1_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivw_f32m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivw.nxv4f64.i32.nxv4f32.i32.i32(i32 3, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivw.nxv4f64.i64.nxv4f32.i64.i64(i64 3, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_sf_vc_v_ivw_f32m2(vfloat64m4_t vd, vfloat32m2_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivw_f32m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivw.nxv8f64.i32.nxv8f32.i32.i32(i32 3, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivw.nxv8f64.i64.nxv8f32.i64.i64(i64 3, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_sf_vc_v_ivw_f32m4(vfloat64m8_t vd, vfloat32m4_t vs2, size_t vl) {
+  return __riscv_sf_vc_v_ivw_f32m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvw_se_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.i32.nxv1f32.nxv1f16.f16.i32(i32 1, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvw_se_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.i64.nxv1f32.nxv1f16.f16.i64(i64 1, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvw_se_f16mf4(vfloat32mf2_t vd, vfloat16mf4_t vs2, _Float16 fs1, size_t vl) {
+  __riscv_sf_vc_fvw_se_f16mf4(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvw_se_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.i32.nxv2f32.nxv2f16.f16.i32(i32 1, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvw_se_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.i64.nxv2f32.nxv2f16.f16.i64(i64 1, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvw_se_f16mf2(vfloat32m1_t vd, vfloat16mf2_t vs2, _Float16 fs1, size_t vl) {
+  __riscv_sf_vc_fvw_se_f16mf2(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvw_se_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.i32.nxv4f32.nxv4f16.f16.i32(i32 1, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvw_se_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.i64.nxv4f32.nxv4f16.f16.i64(i64 1, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvw_se_f16m1(vfloat32m2_t vd, vfloat16m1_t vs2, _Float16 fs1, size_t vl) {
+  __riscv_sf_vc_fvw_se_f16m1(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvw_se_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.i32.nxv8f32.nxv8f16.f16.i32(i32 1, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvw_se_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.i64.nxv8f32.nxv8f16.f16.i64(i64 1, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvw_se_f16m2(vfloat32m4_t vd, vfloat16m2_t vs2, _Float16 fs1, size_t vl) {
+  __riscv_sf_vc_fvw_se_f16m2(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvw_se_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.i32.nxv16f32.nxv16f16.f16.i32(i32 1, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvw_se_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.i64.nxv16f32.nxv16f16.f16.i64(i64 1, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvw_se_f16m4(vfloat32m8_t vd, vfloat16m4_t vs2, _Float16 fs1, size_t vl) {
+  __riscv_sf_vc_fvw_se_f16m4(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvw_se_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.i32.nxv1f64.nxv1f32.f32.i32(i32 1, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvw_se_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.i64.nxv1f64.nxv1f32.f32.i64(i64 1, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvw_se_f32mf2(vfloat64m1_t vd, vfloat32mf2_t vs2, float fs1, size_t vl) {
+  __riscv_sf_vc_fvw_se_f32mf2(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvw_se_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.i32.nxv2f64.nxv2f32.f32.i32(i32 1, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvw_se_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.i64.nxv2f64.nxv2f32.f32.i64(i64 1, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvw_se_f32m1(vfloat64m2_t vd, vfloat32m1_t vs2, float fs1, size_t vl) {
+  __riscv_sf_vc_fvw_se_f32m1(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvw_se_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.i32.nxv4f64.nxv4f32.f32.i32(i32 1, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvw_se_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.i64.nxv4f64.nxv4f32.f32.i64(i64 1, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvw_se_f32m2(vfloat64m4_t vd, vfloat32m2_t vs2, float fs1, size_t vl) {
+  __riscv_sf_vc_fvw_se_f32m2(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvw_se_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.i32.nxv8f64.nxv8f32.f32.i32(i32 1, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvw_se_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.i64.nxv8f64.nxv8f32.f32.i64(i64 1, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvw_se_f32m4(vfloat64m8_t vd, vfloat32m4_t vs2, float fs1, size_t vl) {
+  __riscv_sf_vc_fvw_se_f32m4(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv1f32.i32.nxv1f16.f16.i32(i32 1, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv1f32.i64.nxv1f16.f16.i64(i64 1, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_sf_vc_v_fvw_se_f16mf4(vfloat32mf2_t vd, vfloat16mf4_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvw_se_f16mf4(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv2f32.i32.nxv2f16.f16.i32(i32 1, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv2f32.i64.nxv2f16.f16.i64(i64 1, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_sf_vc_v_fvw_se_f16mf2(vfloat32m1_t vd, vfloat16mf2_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvw_se_f16mf2(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv4f32.i32.nxv4f16.f16.i32(i32 1, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv4f32.i64.nxv4f16.f16.i64(i64 1, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_sf_vc_v_fvw_se_f16m1(vfloat32m2_t vd, vfloat16m1_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvw_se_f16m1(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv8f32.i32.nxv8f16.f16.i32(i32 1, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv8f32.i64.nxv8f16.f16.i64(i64 1, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_sf_vc_v_fvw_se_f16m2(vfloat32m4_t vd, vfloat16m2_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvw_se_f16m2(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv16f32.i32.nxv16f16.f16.i32(i32 1, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv16f32.i64.nxv16f16.f16.i64(i64 1, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_sf_vc_v_fvw_se_f16m4(vfloat32m8_t vd, vfloat16m4_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvw_se_f16m4(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv1f64.i32.nxv1f32.f32.i32(i32 1, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv1f64.i64.nxv1f32.f32.i64(i64 1, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_sf_vc_v_fvw_se_f32mf2(vfloat64m1_t vd, vfloat32mf2_t vs2, float fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvw_se_f32mf2(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv2f64.i32.nxv2f32.f32.i32(i32 1, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv2f64.i64.nxv2f32.f32.i64(i64 1, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_sf_vc_v_fvw_se_f32m1(vfloat64m2_t vd, vfloat32m1_t vs2, float fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvw_se_f32m1(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv4f64.i32.nxv4f32.f32.i32(i32 1, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv4f64.i64.nxv4f32.f32.i64(i64 1, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_sf_vc_v_fvw_se_f32m2(vfloat64m4_t vd, vfloat32m2_t vs2, float fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvw_se_f32m2(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv8f64.i32.nxv8f32.f32.i32(i32 1, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv8f64.i64.nxv8f32.f32.i64(i64 1, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_sf_vc_v_fvw_se_f32m4(vfloat64m8_t vd, vfloat32m4_t vs2, float fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvw_se_f32m4(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_f16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvw.nxv1f32.i32.nxv1f16.f16.i32(i32 1, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvw.nxv1f32.i64.nxv1f16.f16.i64(i64 1, <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_sf_vc_v_fvw_f16mf4(vfloat32mf2_t vd, vfloat16mf4_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvw_f16mf4(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_f16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvw.nxv2f32.i32.nxv2f16.f16.i32(i32 1, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvw.nxv2f32.i64.nxv2f16.f16.i64(i64 1, <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_sf_vc_v_fvw_f16mf2(vfloat32m1_t vd, vfloat16mf2_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvw_f16mf2(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_f16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvw.nxv4f32.i32.nxv4f16.f16.i32(i32 1, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvw.nxv4f32.i64.nxv4f16.f16.i64(i64 1, <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_sf_vc_v_fvw_f16m1(vfloat32m2_t vd, vfloat16m1_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvw_f16m1(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_f16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvw.nxv8f32.i32.nxv8f16.f16.i32(i32 1, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvw.nxv8f32.i64.nxv8f16.f16.i64(i64 1, <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_sf_vc_v_fvw_f16m2(vfloat32m4_t vd, vfloat16m2_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvw_f16m2(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_f16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvw.nxv16f32.i32.nxv16f16.f16.i32(i32 1, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvw.nxv16f32.i64.nxv16f16.f16.i64(i64 1, <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_sf_vc_v_fvw_f16m4(vfloat32m8_t vd, vfloat16m4_t vs2, _Float16 fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvw_f16m4(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvw.nxv1f64.i32.nxv1f32.f32.i32(i32 1, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvw.nxv1f64.i64.nxv1f32.f32.i64(i64 1, <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_sf_vc_v_fvw_f32mf2(vfloat64m1_t vd, vfloat32mf2_t vs2, float fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvw_f32mf2(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvw.nxv2f64.i32.nxv2f32.f32.i32(i32 1, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvw.nxv2f64.i64.nxv2f32.f32.i64(i64 1, <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_sf_vc_v_fvw_f32m1(vfloat64m2_t vd, vfloat32m1_t vs2, float fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvw_f32m1(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvw.nxv4f64.i32.nxv4f32.f32.i32(i32 1, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvw.nxv4f64.i64.nxv4f32.f32.i64(i64 1, <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_sf_vc_v_fvw_f32m2(vfloat64m4_t vd, vfloat32m2_t vs2, float fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvw_f32m2(p26, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvw.nxv8f64.i32.nxv8f32.f32.i32(i32 1, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvw.nxv8f64.i64.nxv8f32.f32.i64(i64 1, <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_sf_vc_v_fvw_f32m4(vfloat64m8_t vd, vfloat32m4_t vs2, float fs1, size_t vl) {
+  return __riscv_sf_vc_v_fvw_f32m4(p26, vd, vs2, fs1, vl);
+}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
index a64fc3403335d23..3975b8426256ac7 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
@@ -463,37 +463,51 @@ multiclass VPatVC_XVV<string intrinsic_suffix, string instruction_suffix,
                      wti.RegClass, vti.RegClass, kind, op1_kind>;
 }
 
+class GetFTypeInfo<int Sew> {
+  ValueType Scalar = !cond(!eq(Sew, 16) : f16,
+                           !eq(Sew, 32) : f32,
+                           !eq(Sew, 64) : f64);
+  RegisterClass ScalarRegClass = !cond(!eq(Sew, 16) : FPR16,
+                                       !eq(Sew, 32) : FPR32,
+                                       !eq(Sew, 64) : FPR64);
+
+  string ScalarSuffix = !cond(!eq(Scalar, f16) : "FPR16",
+                              !eq(Scalar, f32) : "FPR32",
+                              !eq(Scalar, f64) : "FPR64");
+}
+
 let Predicates = [HasVendorXSfvcp] in {
-  foreach vti = AllIntegerVectors in {
-    defm : VPatVC_X<"x", "X", vti, vti.Scalar, vti.ScalarRegClass>;
+  foreach vti = AllVectors in {
+    defm : VPatVC_X<"x", "X", vti, XLenVT, GPR>;
     defm : VPatVC_X<"i", "I", vti, XLenVT, tsimm5>;
-    defm : VPatVC_XV<"xv", "XV", vti, vti.Scalar, vti.ScalarRegClass>;
+    defm : VPatVC_XV<"xv", "XV", vti, XLenVT, GPR>;
     defm : VPatVC_XV<"iv", "IV", vti, XLenVT, tsimm5>;
     defm : VPatVC_XV<"vv", "VV", vti, vti.Vector, vti.RegClass>;
-    defm : VPatVC_XVV<"xvv", "XVV", vti, vti, vti.Scalar, vti.ScalarRegClass>;
+    defm : VPatVC_XVV<"xvv", "XVV", vti, vti, XLenVT, GPR>;
     defm : VPatVC_XVV<"ivv", "IVV", vti, vti, XLenVT, tsimm5>;
     defm : VPatVC_XVV<"vvv", "VVV", vti, vti, vti.Vector, vti.RegClass>;
+
+    if !ne(vti.SEW, 8) then {
+      defvar finfo = GetFTypeInfo<vti.SEW>;
+      defm : VPatVC_XV<"fv", finfo.ScalarSuffix # "V", vti, finfo.Scalar,
+                       finfo.ScalarRegClass, payload1>;
+      defm : VPatVC_XVV<"fvv", finfo.ScalarSuffix # "VV", vti, vti, finfo.Scalar,
+                        finfo.ScalarRegClass, payload1>;
+    }
   }
-  foreach fvti = AllFloatVectors in {
-    defvar ivti = GetIntVTypeInfo<fvti>.Vti;
-    defm : VPatVC_XV<"fv", fvti.ScalarSuffix # "V", ivti, fvti.Scalar,
-                     fvti.ScalarRegClass, payload1>;
-    defm : VPatVC_XVV<"fvv", fvti.ScalarSuffix # "VV", ivti, ivti, fvti.Scalar,
-                      fvti.ScalarRegClass, payload1>;
-  }
-  foreach VtiToWti = AllWidenableIntVectors in {
+  foreach VtiToWti = !listconcat(AllWidenableIntVectors, AllWidenableFloatVectors) in {
     defvar vti = VtiToWti.Vti;
     defvar wti = VtiToWti.Wti;
-    defm : VPatVC_XVV<"xvw", "XVW", wti, vti, vti.Scalar, vti.ScalarRegClass>;
+    defvar iinfo = GetIntVTypeInfo<vti>.Vti;
+    defm : VPatVC_XVV<"xvw", "XVW", wti, vti, iinfo.Scalar, iinfo.ScalarRegClass>;
     defm : VPatVC_XVV<"ivw", "IVW", wti, vti, XLenVT, tsimm5>;
     defm : VPatVC_XVV<"vvw", "VVW", wti, vti, vti.Vector, vti.RegClass>;
-  }
-  foreach VtiToWti = AllWidenableFloatVectors in {
-    defvar fvti = VtiToWti.Vti;
-    defvar iwti = GetIntVTypeInfo<VtiToWti.Wti>.Vti;
-    defvar ivti = GetIntVTypeInfo<fvti>.Vti;
-    defm : VPatVC_XVV<"fvw", fvti.ScalarSuffix # "VW", iwti, ivti, fvti.Scalar,
-                      fvti.ScalarRegClass, payload1>;
+
+    if !ne(vti.SEW, 8) then {
+      defvar finfo = GetFTypeInfo<vti.SEW>;
+      defm : VPatVC_XVV<"fvw", finfo.ScalarSuffix # "VW", wti, vti, finfo.Scalar,
+                        finfo.ScalarRegClass, payload1>;
+    }
   }
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll
index 250a8bd1df962f2..59124ed8179414c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN:  sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+xsfvcp \
+; RUN:  sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+xsfvcp,+zvfh \
 ; RUN:    -verify-machineinstrs | FileCheck %s
-; RUN:  sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvcp \
+; RUN:  sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvcp,+zvfh \
 ; RUN:    -verify-machineinstrs | FileCheck %s
 
 define void @test_sf_vc_x_se_e8mf8(i8 zeroext %rs1, iXLen %vl) {
@@ -1563,3 +1563,393 @@ entry:
 }
 
 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.i.nxv8i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_i_se_e16mf4(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.i.se.nxv1f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.i.se.nxv1f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_i_se_e16mf2(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.i.se.nxv2f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.i.se.nxv2f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_i_se_e16m1(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.i.se.nxv4f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.i.se.nxv4f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_i_se_e16m2(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.i.se.nxv8f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.i.se.nxv8f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_i_se_e16m4(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.i.se.nxv16f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.i.se.nxv16f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_i_se_e16m8(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.i.se.nxv32f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.i.se.nxv32f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_i_se_e32mf2(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.i.se.nxv1f32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.i.se.nxv1f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_i_se_e32m1(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.i.se.nxv2f32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.i.se.nxv2f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_i_se_e32m2(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.i.se.nxv4f32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.i.se.nxv4f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_i_se_e32m4(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.i.se.nxv8f32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.i.se.nxv8f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_i_se_e32m8(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.i.se.nxv16f32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.i.se.nxv16f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_i_se_e64m1(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.i.se.nxv1f64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.i.se.nxv1f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_i_se_e64m2(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.i.se.nxv2f64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.i.se.nxv2f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_i_se_e64m4(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.i.se.nxv4f64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.i.se.nxv4f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_i_se_e64m8(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.i.se.nxv8f64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.i.se.nxv8f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_i_e16mf4(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.i.nxv1f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.i.nxv1f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_i_e16mf2(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.i.nxv2f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.i.nxv2f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_i_e16m1(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.i.nxv4f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.i.nxv4f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_i_e16m2(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.i.nxv8f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.i.nxv8f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_i_e16m4(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.i.nxv16f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.i.nxv16f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_i_e16m8(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.i.nxv32f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.i.nxv32f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_i_e32mf2(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.i.nxv1f32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.i.nxv1f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_i_e32m1(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.i.nxv2f32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.i.nxv2f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_i_e32m2(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.i.nxv4f32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.i.nxv4f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_i_e32m4(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.i.nxv8f32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.i.nxv8f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_i_e32m8(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.i.nxv16f32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.i.nxv16f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_i_e64m1(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.i.nxv1f64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.i.nxv1f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_i_e64m2(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.i.nxv2f64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.i.nxv2f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_i_e64m4(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.i.nxv4f64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.i.nxv4f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_i_e64m8(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.i.nxv8f64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.i.nxv8f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
diff --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv.ll
index d4063180c63e27c..9e0576fe59cd375 100644
--- a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN:  sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+xsfvcp \
+; RUN:  sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+xsfvcp \
 ; RUN:    -verify-machineinstrs | FileCheck %s
-; RUN:  sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+xsfvcp \
+; RUN:  sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+xsfvcp \
 ; RUN:    -verify-machineinstrs | FileCheck %s
 
 define void @test_sf_vc_vv_se_e8mf8(<vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
@@ -3006,3 +3006,2187 @@ entry:
 }
 
 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fv.nxv8i64.iXLen.f64.iXLen(iXLen, <vscale x 8 x i64>, double, iXLen)
+
+define void @test_f_sf_vc_vv_se_e16mf4(<vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f16.nxv1f16.iXLen(iXLen 3, iXLen 31, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f16.nxv1f16.iXLen(iXLen, iXLen, <vscale x 1 x half>, <vscale x 1 x half>, iXLen)
+
+define void @test_f_sf_vc_vv_se_e16mf2(<vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f16.nxv2f16.iXLen(iXLen 3, iXLen 31, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f16.nxv2f16.iXLen(iXLen, iXLen, <vscale x 2 x half>, <vscale x 2 x half>, iXLen)
+
+define void @test_f_sf_vc_vv_se_e16m1(<vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f16.nxv4f16.iXLen(iXLen 3, iXLen 31, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f16.nxv4f16.iXLen(iXLen, iXLen, <vscale x 4 x half>, <vscale x 4 x half>, iXLen)
+
+define void @test_f_sf_vc_vv_se_e16m2(<vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f16.nxv8f16.iXLen(iXLen 3, iXLen 31, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f16.nxv8f16.iXLen(iXLen, iXLen, <vscale x 8 x half>, <vscale x 8 x half>, iXLen)
+
+define void @test_f_sf_vc_vv_se_e16m4(<vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16f16.nxv16f16.iXLen(iXLen 3, iXLen 31, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16f16.nxv16f16.iXLen(iXLen, iXLen, <vscale x 16 x half>, <vscale x 16 x half>, iXLen)
+
+define void @test_f_sf_vc_vv_se_e16m8(<vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32f16.nxv32f16.iXLen(iXLen 3, iXLen 31, <vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32f16.nxv32f16.iXLen(iXLen, iXLen, <vscale x 32 x half>, <vscale x 32 x half>, iXLen)
+
+define void @test_f_sf_vc_vv_se_e32mf2(<vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f32.nxv1f32.iXLen(iXLen 3, iXLen 31, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f32.nxv1f32.iXLen(iXLen, iXLen, <vscale x 1 x float>, <vscale x 1 x float>, iXLen)
+
+define void @test_f_sf_vc_vv_se_e32m1(<vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f32.nxv2f32.iXLen(iXLen 3, iXLen 31, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f32.nxv2f32.iXLen(iXLen, iXLen, <vscale x 2 x float>, <vscale x 2 x float>, iXLen)
+
+define void @test_f_sf_vc_vv_se_e32m2(<vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f32.nxv4f32.iXLen(iXLen 3, iXLen 31, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f32.nxv4f32.iXLen(iXLen, iXLen, <vscale x 4 x float>, <vscale x 4 x float>, iXLen)
+
+define void @test_f_sf_vc_vv_se_e32m4(<vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f32.nxv8f32.iXLen(iXLen 3, iXLen 31, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f32.nxv8f32.iXLen(iXLen, iXLen, <vscale x 8 x float>, <vscale x 8 x float>, iXLen)
+
+define void @test_f_sf_vc_vv_se_e32m8(<vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16f32.nxv16f32.iXLen(iXLen 3, iXLen 31, <vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16f32.nxv16f32.iXLen(iXLen, iXLen, <vscale x 16 x float>, <vscale x 16 x float>, iXLen)
+
+define void @test_f_sf_vc_vv_se_e64m1(<vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f64.nxv1f64.iXLen(iXLen 3, iXLen 31, <vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f64.nxv1f64.iXLen(iXLen, iXLen, <vscale x 1 x double>, <vscale x 1 x double>, iXLen)
+
+define void @test_f_sf_vc_vv_se_e64m2(<vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f64.nxv2f64.iXLen(iXLen 3, iXLen 31, <vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f64.nxv2f64.iXLen(iXLen, iXLen, <vscale x 2 x double>, <vscale x 2 x double>, iXLen)
+
+define void @test_f_sf_vc_vv_se_e64m4(<vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f64.nxv4f64.iXLen(iXLen 3, iXLen 31, <vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f64.nxv4f64.iXLen(iXLen, iXLen, <vscale x 4 x double>, <vscale x 4 x double>, iXLen)
+
+define void @test_f_sf_vc_vv_se_e64m8(<vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f64.nxv8f64.iXLen(iXLen 3, iXLen 31, <vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f64.nxv8f64.iXLen(iXLen, iXLen, <vscale x 8 x double>, <vscale x 8 x double>, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_vv_se_e16mf4(<vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.vv.se.nxv1f16.iXLen.nxv1f16.iXLen(iXLen 3, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.vv.se.nxv1f16.iXLen.nxv1f16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_vv_se_e16mf2(<vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.vv.se.nxv2f16.iXLen.nxv2f16.iXLen(iXLen 3, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.vv.se.nxv2f16.iXLen.nxv2f16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_vv_se_e16m1(<vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.vv.se.nxv4f16.iXLen.nxv4f16.iXLen(iXLen 3, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.vv.se.nxv4f16.iXLen.nxv4f16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_vv_se_e16m2(<vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.vv.se.nxv8f16.iXLen.nxv8f16.iXLen(iXLen 3, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.vv.se.nxv8f16.iXLen.nxv8f16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_vv_se_e16m4(<vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.vv.se.nxv16f16.iXLen.nxv16f16.iXLen(iXLen 3, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.vv.se.nxv16f16.iXLen.nxv16f16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_vv_se_e16m8(<vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.vv.se.nxv32f16.iXLen.nxv32f16.iXLen(iXLen 3, <vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.vv.se.nxv32f16.iXLen.nxv32f16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_vv_se_e32mf2(<vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.vv.se.nxv1f32.iXLen.nxv1f32.iXLen(iXLen 3, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.vv.se.nxv1f32.iXLen.nxv1f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_vv_se_e32m1(<vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.vv.se.nxv2f32.iXLen.nxv2f32.iXLen(iXLen 3, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.vv.se.nxv2f32.iXLen.nxv2f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_vv_se_e32m2(<vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vv.se.nxv4f32.iXLen.nxv4f32.iXLen(iXLen 3, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.vv.se.nxv4f32.iXLen.nxv4f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_vv_se_e32m4(<vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.vv.se.nxv8f32.iXLen.nxv8f32.iXLen(iXLen 3, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.vv.se.nxv8f32.iXLen.nxv8f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_vv_se_e32m8(<vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.vv.se.nxv16f32.iXLen.nxv16f32.iXLen(iXLen 3, <vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.vv.se.nxv16f32.iXLen.nxv16f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_vv_se_e64m1(<vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.vv.se.nxv1f64.iXLen.nxv1f64.iXLen(iXLen 3, <vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.vv.se.nxv1f64.iXLen.nxv1f64.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x double>, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_vv_se_e64m2(<vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.vv.se.nxv2f64.iXLen.nxv2f64.iXLen(iXLen 3, <vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.vv.se.nxv2f64.iXLen.nxv2f64.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x double>, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_vv_se_e64m4(<vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vv.se.nxv4f64.iXLen.nxv4f64.iXLen(iXLen 3, <vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.vv.se.nxv4f64.iXLen.nxv4f64.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x double>, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_vv_se_e64m8(<vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.vv.se.nxv8f64.iXLen.nxv8f64.iXLen(iXLen 3, <vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.vv.se.nxv8f64.iXLen.nxv8f64.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_vv_e16mf4(<vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.vv.nxv1f16.iXLen.nxv1f16.iXLen(iXLen 3, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.vv.nxv1f16.iXLen.nxv1f16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_vv_e16mf2(<vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.vv.nxv2f16.iXLen.nxv2f16.iXLen(iXLen 3, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.vv.nxv2f16.iXLen.nxv2f16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_vv_e16m1(<vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.vv.nxv4f16.iXLen.nxv4f16.iXLen(iXLen 3, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.vv.nxv4f16.iXLen.nxv4f16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_vv_e16m2(<vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.vv.nxv8f16.iXLen.nxv8f16.iXLen(iXLen 3, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.vv.nxv8f16.iXLen.nxv8f16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_vv_e16m4(<vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.vv.nxv16f16.iXLen.nxv16f16.iXLen(iXLen 3, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.vv.nxv16f16.iXLen.nxv16f16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_vv_e16m8(<vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.vv.nxv32f16.iXLen.nxv32f16.iXLen(iXLen 3, <vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.vv.nxv32f16.iXLen.nxv32f16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_vv_e32mf2(<vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.vv.nxv1f32.iXLen.nxv1f32.iXLen(iXLen 3, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.vv.nxv1f32.iXLen.nxv1f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_vv_e32m1(<vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.vv.nxv2f32.iXLen.nxv2f32.iXLen(iXLen 3, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.vv.nxv2f32.iXLen.nxv2f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_vv_e32m2(<vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vv.nxv4f32.iXLen.nxv4f32.iXLen(iXLen 3, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.vv.nxv4f32.iXLen.nxv4f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_vv_e32m4(<vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.vv.nxv8f32.iXLen.nxv8f32.iXLen(iXLen 3, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.vv.nxv8f32.iXLen.nxv8f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_vv_e32m8(<vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.vv.nxv16f32.iXLen.nxv16f32.iXLen(iXLen 3, <vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.vv.nxv16f32.iXLen.nxv16f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_vv_e64m1(<vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.vv.nxv1f64.iXLen.nxv1f64.iXLen(iXLen 3, <vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.vv.nxv1f64.iXLen.nxv1f64.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x double>, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_vv_e64m2(<vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.vv.nxv2f64.iXLen.nxv2f64.iXLen(iXLen 3, <vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.vv.nxv2f64.iXLen.nxv2f64.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x double>, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_vv_e64m4(<vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vv.nxv4f64.iXLen.nxv4f64.iXLen(iXLen 3, <vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.vv.nxv4f64.iXLen.nxv4f64.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x double>, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_vv_e64m8(<vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.vv.nxv8f64.iXLen.nxv8f64.iXLen(iXLen 3, <vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.vv.nxv8f64.iXLen.nxv8f64.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, iXLen)
+
+define void @test_f_sf_vc_xv_se_e16mf4(<vscale x 1 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1f16.i16.iXLen(iXLen 3, iXLen 31, <vscale x 1 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1f16.i16.iXLen(iXLen, iXLen, <vscale x 1 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xv_se_e16mf2(<vscale x 2 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2f16.i16.iXLen(iXLen 3, iXLen 31, <vscale x 2 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2f16.i16.iXLen(iXLen, iXLen, <vscale x 2 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xv_se_e16m1(<vscale x 4 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4f16.i16.iXLen(iXLen 3, iXLen 31, <vscale x 4 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4f16.i16.iXLen(iXLen, iXLen, <vscale x 4 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xv_se_e16m2(<vscale x 8 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8f16.i16.iXLen(iXLen 3, iXLen 31, <vscale x 8 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8f16.i16.iXLen(iXLen, iXLen, <vscale x 8 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xv_se_e16m4(<vscale x 16 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16f16.i16.iXLen(iXLen 3, iXLen 31, <vscale x 16 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16f16.i16.iXLen(iXLen, iXLen, <vscale x 16 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xv_se_e16m8(<vscale x 32 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32f16.i16.iXLen(iXLen 3, iXLen 31, <vscale x 32 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32f16.i16.iXLen(iXLen, iXLen, <vscale x 32 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xv_se_e32mf2(<vscale x 1 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1f32.i32.iXLen(iXLen 3, iXLen 31, <vscale x 1 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1f32.i32.iXLen(iXLen, iXLen, <vscale x 1 x float>, i32, iXLen)
+
+define void @test_f_sf_vc_xv_se_e32m1(<vscale x 2 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2f32.i32.iXLen(iXLen 3, iXLen 31, <vscale x 2 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2f32.i32.iXLen(iXLen, iXLen, <vscale x 2 x float>, i32, iXLen)
+
+define void @test_f_sf_vc_xv_se_e32m2(<vscale x 4 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4f32.i32.iXLen(iXLen 3, iXLen 31, <vscale x 4 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4f32.i32.iXLen(iXLen, iXLen, <vscale x 4 x float>, i32, iXLen)
+
+define void @test_f_sf_vc_xv_se_e32m4(<vscale x 8 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8f32.i32.iXLen(iXLen 3, iXLen 31, <vscale x 8 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8f32.i32.iXLen(iXLen, iXLen, <vscale x 8 x float>, i32, iXLen)
+
+define void @test_f_sf_vc_xv_se_e32m8(<vscale x 16 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16f32.i32.iXLen(iXLen 3, iXLen 31, <vscale x 16 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16f32.i32.iXLen(iXLen, iXLen, <vscale x 16 x float>, i32, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_xv_se_e16mf4(<vscale x 1 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.xv.se.nxv1f16.iXLen.i16.iXLen(iXLen 3, <vscale x 1 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.xv.se.nxv1f16.iXLen.i16.iXLen(iXLen, <vscale x 1 x half>, i16, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_xv_se_e16mf2(<vscale x 2 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.xv.se.nxv2f16.iXLen.i16.iXLen(iXLen 3, <vscale x 2 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.xv.se.nxv2f16.iXLen.i16.iXLen(iXLen, <vscale x 2 x half>, i16, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_xv_se_e16m1(<vscale x 4 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.xv.se.nxv4f16.iXLen.i16.iXLen(iXLen 3, <vscale x 4 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.xv.se.nxv4f16.iXLen.i16.iXLen(iXLen, <vscale x 4 x half>, i16, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_xv_se_e16m2(<vscale x 8 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.xv.se.nxv8f16.iXLen.i16.iXLen(iXLen 3, <vscale x 8 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.xv.se.nxv8f16.iXLen.i16.iXLen(iXLen, <vscale x 8 x half>, i16, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_xv_se_e16m4(<vscale x 16 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.xv.se.nxv16f16.iXLen.i16.iXLen(iXLen 3, <vscale x 16 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.xv.se.nxv16f16.iXLen.i16.iXLen(iXLen, <vscale x 16 x half>, i16, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_xv_se_e16m8(<vscale x 32 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.xv.se.nxv32f16.iXLen.i16.iXLen(iXLen 3, <vscale x 32 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.xv.se.nxv32f16.iXLen.i16.iXLen(iXLen, <vscale x 32 x half>, i16, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_xv_se_e32mf2(<vscale x 1 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.xv.se.nxv1f32.i32.f32.iXLen(iXLen 3, <vscale x 1 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.xv.se.nxv1f32.i32.f32.iXLen(iXLen, <vscale x 1 x float>, i32, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_xv_se_e32m1(<vscale x 2 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.xv.se.nxv2f32.i32.f32.iXLen(iXLen 3, <vscale x 2 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.xv.se.nxv2f32.i32.f32.iXLen(iXLen, <vscale x 2 x float>, i32, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_xv_se_e32m2(<vscale x 4 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xv.se.nxv4f32.i32.f32.iXLen(iXLen 3, <vscale x 4 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.xv.se.nxv4f32.i32.f32.iXLen(iXLen, <vscale x 4 x float>, i32, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_xv_se_e32m4(<vscale x 8 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.xv.se.nxv8f32.i32.f32.iXLen(iXLen 3, <vscale x 8 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.xv.se.nxv8f32.i32.f32.iXLen(iXLen, <vscale x 8 x float>, i32, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_xv_se_e32m8(<vscale x 16 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.xv.se.nxv16f32.i32.f32.iXLen(iXLen 3, <vscale x 16 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.xv.se.nxv16f32.i32.f32.iXLen(iXLen, <vscale x 16 x float>, i32, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_xv_e16mf4(<vscale x 1 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.xv.nxv1f16.iXLen.i16.iXLen(iXLen 3, <vscale x 1 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.xv.nxv1f16.iXLen.i16.iXLen(iXLen, <vscale x 1 x half>, i16, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_xv_e16mf2(<vscale x 2 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.xv.nxv2f16.iXLen.i16.iXLen(iXLen 3, <vscale x 2 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.xv.nxv2f16.iXLen.i16.iXLen(iXLen, <vscale x 2 x half>, i16, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_xv_e16m1(<vscale x 4 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.xv.nxv4f16.iXLen.i16.iXLen(iXLen 3, <vscale x 4 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.xv.nxv4f16.iXLen.i16.iXLen(iXLen, <vscale x 4 x half>, i16, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_xv_e16m2(<vscale x 8 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.xv.nxv8f16.iXLen.i16.iXLen(iXLen 3, <vscale x 8 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.xv.nxv8f16.iXLen.i16.iXLen(iXLen, <vscale x 8 x half>, i16, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_xv_e16m4(<vscale x 16 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.xv.nxv16f16.iXLen.i16.iXLen(iXLen 3, <vscale x 16 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.xv.nxv16f16.iXLen.i16.iXLen(iXLen, <vscale x 16 x half>, i16, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_xv_e16m8(<vscale x 32 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.xv.nxv32f16.iXLen.i16.iXLen(iXLen 3, <vscale x 32 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.xv.nxv32f16.iXLen.i16.iXLen(iXLen, <vscale x 32 x half>, i16, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_xv_e32mf2(<vscale x 1 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.xv.nxv1f32.i32.f32.iXLen(iXLen 3, <vscale x 1 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.xv.nxv1f32.i32.f32.iXLen(iXLen, <vscale x 1 x float>, i32, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_xv_e32m1(<vscale x 2 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.xv.nxv2f32.i32.f32.iXLen(iXLen 3, <vscale x 2 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.xv.nxv2f32.i32.f32.iXLen(iXLen, <vscale x 2 x float>, i32, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_xv_e32m2(<vscale x 4 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xv.nxv4f32.i32.f32.iXLen(iXLen 3, <vscale x 4 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.xv.nxv4f32.i32.f32.iXLen(iXLen, <vscale x 4 x float>, i32, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_xv_e32m4(<vscale x 8 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.xv.nxv8f32.i32.f32.iXLen(iXLen 3, <vscale x 8 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.xv.nxv8f32.i32.f32.iXLen(iXLen, <vscale x 8 x float>, i32, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_xv_e32m8(<vscale x 16 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.xv.nxv16f32.i32.f32.iXLen(iXLen 3, <vscale x 16 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.xv.nxv16f32.i32.f32.iXLen(iXLen, <vscale x 16 x float>, i32, iXLen)
+
+define void @test_f_sf_vc_iv_se_e16mf4(<vscale x 1 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_iv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f16.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 1 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f16.iXLen.iXLen(iXLen, iXLen, <vscale x 1 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_iv_se_e16mf2(<vscale x 2 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_iv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f16.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 2 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f16.iXLen.iXLen(iXLen, iXLen, <vscale x 2 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_iv_se_e16m1(<vscale x 4 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_iv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f16.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 4 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f16.iXLen.iXLen(iXLen, iXLen, <vscale x 4 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_iv_se_e16m2(<vscale x 8 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_iv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f16.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 8 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f16.iXLen.iXLen(iXLen, iXLen, <vscale x 8 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_iv_se_e16m4(<vscale x 16 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_iv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16f16.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 16 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16f16.iXLen.iXLen(iXLen, iXLen, <vscale x 16 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_iv_se_e16m8(<vscale x 32 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_iv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32f16.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 32 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32f16.iXLen.iXLen(iXLen, iXLen, <vscale x 32 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_iv_se_e32mf2(<vscale x 1 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_iv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f32.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 1 x float> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f32.iXLen.iXLen(iXLen, iXLen, <vscale x 1 x float>, iXLen, iXLen)
+
+define void @test_f_sf_vc_iv_se_e32m1(<vscale x 2 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_iv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f32.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 2 x float> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f32.iXLen.iXLen(iXLen, iXLen, <vscale x 2 x float>, iXLen, iXLen)
+
+define void @test_f_sf_vc_iv_se_e32m2(<vscale x 4 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_iv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f32.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 4 x float> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f32.iXLen.iXLen(iXLen, iXLen, <vscale x 4 x float>, iXLen, iXLen)
+
+define void @test_f_sf_vc_iv_se_e32m4(<vscale x 8 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_iv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f32.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 8 x float> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f32.iXLen.iXLen(iXLen, iXLen, <vscale x 8 x float>, iXLen, iXLen)
+
+define void @test_f_sf_vc_iv_se_e32m8(<vscale x 16 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_iv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16f32.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 16 x float> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16f32.iXLen.iXLen(iXLen, iXLen, <vscale x 16 x float>, iXLen, iXLen)
+
+define void @test_f_sf_vc_iv_se_e64m1(<vscale x 1 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_iv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f64.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 1 x double> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f64.iXLen.iXLen(iXLen, iXLen, <vscale x 1 x double>, iXLen, iXLen)
+
+define void @test_f_sf_vc_iv_se_e64m2(<vscale x 2 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_iv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f64.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 2 x double> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f64.iXLen.iXLen(iXLen, iXLen, <vscale x 2 x double>, iXLen, iXLen)
+
+define void @test_f_sf_vc_iv_se_e64m4(<vscale x 4 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_iv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f64.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 4 x double> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f64.iXLen.iXLen(iXLen, iXLen, <vscale x 4 x double>, iXLen, iXLen)
+
+define void @test_f_sf_vc_iv_se_e64m8(<vscale x 8 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_iv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f64.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 8 x double> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f64.iXLen.iXLen(iXLen, iXLen, <vscale x 8 x double>, iXLen, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_iv_se_e16mf4(<vscale x 1 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.iv.se.nxv1f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.iv.se.nxv1f16.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x half>, iXLen, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_iv_se_e16mf2(<vscale x 2 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.iv.se.nxv2f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.iv.se.nxv2f16.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x half>, iXLen, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_iv_se_e16m1(<vscale x 4 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.iv.se.nxv4f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.iv.se.nxv4f16.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x half>, iXLen, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_iv_se_e16m2(<vscale x 8 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.iv.se.nxv8f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.iv.se.nxv8f16.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x half>, iXLen, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_iv_se_e16m4(<vscale x 16 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.iv.se.nxv16f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.iv.se.nxv16f16.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x half>, iXLen, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_iv_se_e16m8(<vscale x 32 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.iv.se.nxv32f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.iv.se.nxv32f16.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x half>, iXLen, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_iv_se_e32mf2(<vscale x 1 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.iv.se.nxv1f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.iv.se.nxv1f32.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x float>, iXLen, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_iv_se_e32m1(<vscale x 2 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.iv.se.nxv2f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.iv.se.nxv2f32.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x float>, iXLen, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_iv_se_e32m2(<vscale x 4 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.iv.se.nxv4f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.iv.se.nxv4f32.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x float>, iXLen, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_iv_se_e32m4(<vscale x 8 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.iv.se.nxv8f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.iv.se.nxv8f32.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x float>, iXLen, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_iv_se_e32m8(<vscale x 16 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.iv.se.nxv16f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.iv.se.nxv16f32.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x float>, iXLen, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_iv_se_e64m1(<vscale x 1 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.iv.se.nxv1f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x double> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.iv.se.nxv1f64.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x double>, iXLen, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_iv_se_e64m2(<vscale x 2 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.iv.se.nxv2f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x double> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.iv.se.nxv2f64.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x double>, iXLen, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_iv_se_e64m4(<vscale x 4 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.iv.se.nxv4f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x double> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.iv.se.nxv4f64.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x double>, iXLen, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_iv_se_e64m8(<vscale x 8 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.iv.se.nxv8f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x double> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.iv.se.nxv8f64.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x double>, iXLen, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_iv_e16mf4(<vscale x 1 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.iv.nxv1f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.iv.nxv1f16.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x half>, iXLen, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_iv_e16mf2(<vscale x 2 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.iv.nxv2f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.iv.nxv2f16.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x half>, iXLen, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_iv_e16m1(<vscale x 4 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.iv.nxv4f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.iv.nxv4f16.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x half>, iXLen, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_iv_e16m2(<vscale x 8 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.iv.nxv8f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.iv.nxv8f16.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x half>, iXLen, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_iv_e16m4(<vscale x 16 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.iv.nxv16f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.iv.nxv16f16.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x half>, iXLen, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_iv_e16m8(<vscale x 32 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.iv.nxv32f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.iv.nxv32f16.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x half>, iXLen, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_iv_e32mf2(<vscale x 1 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.iv.nxv1f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.iv.nxv1f32.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x float>, iXLen, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_iv_e32m1(<vscale x 2 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.iv.nxv2f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.iv.nxv2f32.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x float>, iXLen, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_iv_e32m2(<vscale x 4 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.iv.nxv4f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.iv.nxv4f32.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x float>, iXLen, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_iv_e32m4(<vscale x 8 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.iv.nxv8f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.iv.nxv8f32.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x float>, iXLen, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_iv_e32m8(<vscale x 16 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.iv.nxv16f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.iv.nxv16f32.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x float>, iXLen, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_iv_e64m1(<vscale x 1 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.iv.nxv1f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x double> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.iv.nxv1f64.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x double>, iXLen, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_iv_e64m2(<vscale x 2 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.iv.nxv2f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x double> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.iv.nxv2f64.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x double>, iXLen, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_iv_e64m4(<vscale x 4 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.iv.nxv4f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x double> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.iv.nxv4f64.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x double>, iXLen, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_iv_e64m8(<vscale x 8 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.iv.nxv8f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x double> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.iv.nxv8f64.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x double>, iXLen, iXLen)
+
+define void @test_f_sf_vc_fv_se_e16mf4(<vscale x 1 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f16.i16.iXLen(iXLen 1, iXLen 31, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f16.i16.iXLen(iXLen, iXLen, <vscale x 1 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fv_se_e16mf2(<vscale x 2 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f16.i16.iXLen(iXLen 1, iXLen 31, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f16.i16.iXLen(iXLen, iXLen, <vscale x 2 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fv_se_e16m1(<vscale x 4 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f16.i16.iXLen(iXLen 1, iXLen 31, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f16.i16.iXLen(iXLen, iXLen, <vscale x 4 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fv_se_e16m2(<vscale x 8 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f16.i16.iXLen(iXLen 1, iXLen 31, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f16.i16.iXLen(iXLen, iXLen, <vscale x 8 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fv_se_e16m4(<vscale x 16 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16f16.i16.iXLen(iXLen 1, iXLen 31, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16f16.i16.iXLen(iXLen, iXLen, <vscale x 16 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fv_se_e16m8(<vscale x 32 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv32f16.i16.iXLen(iXLen 1, iXLen 31, <vscale x 32 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv32f16.i16.iXLen(iXLen, iXLen, <vscale x 32 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fv_se_e32mf2(<vscale x 1 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f32.i32.iXLen(iXLen 1, iXLen 31, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f32.i32.iXLen(iXLen, iXLen, <vscale x 1 x float>, float, iXLen)
+
+define void @test_f_sf_vc_fv_se_e32m1(<vscale x 2 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f32.i32.iXLen(iXLen 1, iXLen 31, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f32.i32.iXLen(iXLen, iXLen, <vscale x 2 x float>, float, iXLen)
+
+define void @test_f_sf_vc_fv_se_e32m2(<vscale x 4 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f32.i32.iXLen(iXLen 1, iXLen 31, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f32.i32.iXLen(iXLen, iXLen, <vscale x 4 x float>, float, iXLen)
+
+define void @test_f_sf_vc_fv_se_e32m4(<vscale x 8 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f32.i32.iXLen(iXLen 1, iXLen 31, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f32.i32.iXLen(iXLen, iXLen, <vscale x 8 x float>, float, iXLen)
+
+define void @test_f_sf_vc_fv_se_e32m8(<vscale x 16 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16f32.i32.iXLen(iXLen 1, iXLen 31, <vscale x 16 x float> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16f32.i32.iXLen(iXLen, iXLen, <vscale x 16 x float>, float, iXLen)
+
+define void @test_f_sf_vc_fv_se_e64m1(<vscale x 1 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f64.i64.iXLen(iXLen 1, iXLen 31, <vscale x 1 x double> %vs2, double %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f64.i64.iXLen(iXLen, iXLen, <vscale x 1 x double>, double, iXLen)
+
+define void @test_f_sf_vc_fv_se_e64m2(<vscale x 2 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f64.i64.iXLen(iXLen 1, iXLen 31, <vscale x 2 x double> %vs2, double %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f64.i64.iXLen(iXLen, iXLen, <vscale x 2 x double>, double, iXLen)
+
+define void @test_f_sf_vc_fv_se_e64m4(<vscale x 4 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f64.i64.iXLen(iXLen 1, iXLen 31, <vscale x 4 x double> %vs2, double %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f64.i64.iXLen(iXLen, iXLen, <vscale x 4 x double>, double, iXLen)
+
+define void @test_f_sf_vc_fv_se_e64m8(<vscale x 8 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f64.i64.iXLen(iXLen 1, iXLen 31, <vscale x 8 x double> %vs2, double %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f64.i64.iXLen(iXLen, iXLen, <vscale x 8 x double>, double, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_fv_se_e16mf4(<vscale x 1 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.fv.se.nxv1f16.iXLen.i16.iXLen(iXLen 1, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.fv.se.nxv1f16.iXLen.i16.iXLen(iXLen, <vscale x 1 x half>, half, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_fv_se_e16mf2(<vscale x 2 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.fv.se.nxv2f16.iXLen.i16.iXLen(iXLen 1, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.fv.se.nxv2f16.iXLen.i16.iXLen(iXLen, <vscale x 2 x half>, half, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_fv_se_e16m1(<vscale x 4 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.fv.se.nxv4f16.iXLen.i16.iXLen(iXLen 1, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.fv.se.nxv4f16.iXLen.i16.iXLen(iXLen, <vscale x 4 x half>, half, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_fv_se_e16m2(<vscale x 8 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.fv.se.nxv8f16.iXLen.i16.iXLen(iXLen 1, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.fv.se.nxv8f16.iXLen.i16.iXLen(iXLen, <vscale x 8 x half>, half, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_fv_se_e16m4(<vscale x 16 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.fv.se.nxv16f16.iXLen.i16.iXLen(iXLen 1, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.fv.se.nxv16f16.iXLen.i16.iXLen(iXLen, <vscale x 16 x half>, half, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_fv_se_e16m8(<vscale x 32 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.fv.se.nxv32f16.iXLen.i16.iXLen(iXLen 1, <vscale x 32 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.fv.se.nxv32f16.iXLen.i16.iXLen(iXLen, <vscale x 32 x half>, half, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_fv_se_e32mf2(<vscale x 1 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.fv.se.nxv1f32.iXLen.i32.iXLen(iXLen 1, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.fv.se.nxv1f32.iXLen.i32.iXLen(iXLen, <vscale x 1 x float>, float, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_fv_se_e32m1(<vscale x 2 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.fv.se.nxv2f32.iXLen.i32.iXLen(iXLen 1, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.fv.se.nxv2f32.iXLen.i32.iXLen(iXLen, <vscale x 2 x float>, float, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_fv_se_e32m2(<vscale x 4 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fv.se.nxv4f32.iXLen.i32.iXLen(iXLen 1, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.fv.se.nxv4f32.iXLen.i32.iXLen(iXLen, <vscale x 4 x float>, float, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_fv_se_e32m4(<vscale x 8 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.fv.se.nxv8f32.iXLen.i32.iXLen(iXLen 1, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.fv.se.nxv8f32.iXLen.i32.iXLen(iXLen, <vscale x 8 x float>, float, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_fv_se_e32m8(<vscale x 16 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.fv.se.nxv16f32.iXLen.i32.iXLen(iXLen 1, <vscale x 16 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.fv.se.nxv16f32.iXLen.i32.iXLen(iXLen, <vscale x 16 x float>, float, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_fv_se_e64m1(<vscale x 1 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.fv.se.nxv1f64.iXLen.i64.iXLen(iXLen 1, <vscale x 1 x double> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.fv.se.nxv1f64.iXLen.i64.iXLen(iXLen, <vscale x 1 x double>, double, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_fv_se_e64m2(<vscale x 2 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.fv.se.nxv2f64.iXLen.i64.iXLen(iXLen 1, <vscale x 2 x double> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.fv.se.nxv2f64.iXLen.i64.iXLen(iXLen, <vscale x 2 x double>, double, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_fv_se_e64m4(<vscale x 4 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.fv.se.nxv4f64.iXLen.i64.iXLen(iXLen 1, <vscale x 4 x double> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.fv.se.nxv4f64.iXLen.i64.iXLen(iXLen, <vscale x 4 x double>, double, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_fv_se_e64m8(<vscale x 8 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.fv.se.nxv8f64.iXLen.i64.iXLen(iXLen 1, <vscale x 8 x double> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.fv.se.nxv8f64.iXLen.i64.iXLen(iXLen, <vscale x 8 x double>, double, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_fv_e16mf4(<vscale x 1 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.fv.nxv1f16.iXLen.i16.iXLen(iXLen 1, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.fv.nxv1f16.iXLen.i16.iXLen(iXLen, <vscale x 1 x half>, half, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_fv_e16mf2(<vscale x 2 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.fv.nxv2f16.iXLen.i16.iXLen(iXLen 1, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.fv.nxv2f16.iXLen.i16.iXLen(iXLen, <vscale x 2 x half>, half, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_fv_e16m1(<vscale x 4 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.fv.nxv4f16.iXLen.i16.iXLen(iXLen 1, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.fv.nxv4f16.iXLen.i16.iXLen(iXLen, <vscale x 4 x half>, half, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_fv_e16m2(<vscale x 8 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.fv.nxv8f16.iXLen.i16.iXLen(iXLen 1, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.fv.nxv8f16.iXLen.i16.iXLen(iXLen, <vscale x 8 x half>, half, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_fv_e16m4(<vscale x 16 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.fv.nxv16f16.iXLen.i16.iXLen(iXLen 1, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.fv.nxv16f16.iXLen.i16.iXLen(iXLen, <vscale x 16 x half>, half, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_fv_e16m8(<vscale x 32 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.fv.nxv32f16.iXLen.i16.iXLen(iXLen 1, <vscale x 32 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.fv.nxv32f16.iXLen.i16.iXLen(iXLen, <vscale x 32 x half>, half, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_fv_e32mf2(<vscale x 1 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.fv.nxv1f32.iXLen.i32.iXLen(iXLen 1, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.fv.nxv1f32.iXLen.i32.iXLen(iXLen, <vscale x 1 x float>, float, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_fv_e32m1(<vscale x 2 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.fv.nxv2f32.iXLen.i32.iXLen(iXLen 1, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.fv.nxv2f32.iXLen.i32.iXLen(iXLen, <vscale x 2 x float>, float, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_fv_e32m2(<vscale x 4 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fv.nxv4f32.iXLen.i32.iXLen(iXLen 1, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.fv.nxv4f32.iXLen.i32.iXLen(iXLen, <vscale x 4 x float>, float, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_fv_e32m4(<vscale x 8 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.fv.nxv8f32.iXLen.i32.iXLen(iXLen 1, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.fv.nxv8f32.iXLen.i32.iXLen(iXLen, <vscale x 8 x float>, float, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_fv_e32m8(<vscale x 16 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.fv.nxv16f32.iXLen.i32.iXLen(iXLen 1, <vscale x 16 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.fv.nxv16f32.iXLen.i32.iXLen(iXLen, <vscale x 16 x float>, float, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_fv_e64m1(<vscale x 1 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.fv.nxv1f64.iXLen.i64.iXLen(iXLen 1, <vscale x 1 x double> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.fv.nxv1f64.iXLen.i64.iXLen(iXLen, <vscale x 1 x double>, double, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_fv_e64m2(<vscale x 2 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.fv.nxv2f64.iXLen.i64.iXLen(iXLen 1, <vscale x 2 x double> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.fv.nxv2f64.iXLen.i64.iXLen(iXLen, <vscale x 2 x double>, double, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_fv_e64m4(<vscale x 4 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.fv.nxv4f64.iXLen.i64.iXLen(iXLen 1, <vscale x 4 x double> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.fv.nxv4f64.iXLen.i64.iXLen(iXLen, <vscale x 4 x double>, double, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_fv_e64m8(<vscale x 8 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.fv.nxv8f64.iXLen.i64.iXLen(iXLen 1, <vscale x 8 x double> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.fv.nxv8f64.iXLen.i64.iXLen(iXLen, <vscale x 8 x double>, double, iXLen)
diff --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll
index d37d121cdd19737..7f78306fe3380a5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN:  sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+xsfvcp \
+; RUN:  sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+xsfvcp \
 ; RUN:    -verify-machineinstrs | FileCheck %s
-; RUN:  sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+xsfvcp \
+; RUN:  sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+xsfvcp \
 ; RUN:    -verify-machineinstrs | FileCheck %s
 
 define void @test_sf_vc_vvv_se_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
@@ -2441,11 +2441,11 @@ define void @test_sf_vc_fvv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16>
 ; CHECK-NEXT:    sf.vc.fvv 1, v8, v9, fa0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i16.f16.iXLen(iXLen 1, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i16.i16.iXLen(iXLen 1, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i16.f16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, half, iXLen)
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i16.i16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, half, iXLen)
 
 define void @test_sf_vc_fvv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_fvv_se_e16mf2:
@@ -2454,11 +2454,11 @@ define void @test_sf_vc_fvv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16>
 ; CHECK-NEXT:    sf.vc.fvv 1, v8, v9, fa0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i16.f16.iXLen(iXLen 1, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i16.i16.iXLen(iXLen 1, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i16.f16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, half, iXLen)
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i16.i16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, half, iXLen)
 
 define void @test_sf_vc_fvv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_fvv_se_e16m1:
@@ -2467,11 +2467,11 @@ define void @test_sf_vc_fvv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16>
 ; CHECK-NEXT:    sf.vc.fvv 1, v8, v9, fa0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i16.f16.iXLen(iXLen 1, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i16.i16.iXLen(iXLen 1, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i16.f16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, half, iXLen)
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i16.i16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, half, iXLen)
 
 define void @test_sf_vc_fvv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_fvv_se_e16m2:
@@ -2480,11 +2480,11 @@ define void @test_sf_vc_fvv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16>
 ; CHECK-NEXT:    sf.vc.fvv 1, v8, v10, fa0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i16.f16.iXLen(iXLen 1, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i16.i16.iXLen(iXLen 1, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i16.f16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, half, iXLen)
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i16.i16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, half, iXLen)
 
 define void @test_sf_vc_fvv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_fvv_se_e16m4:
@@ -2493,11 +2493,11 @@ define void @test_sf_vc_fvv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16
 ; CHECK-NEXT:    sf.vc.fvv 1, v8, v12, fa0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16i16.f16.iXLen(iXLen 1, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16i16.i16.iXLen(iXLen 1, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16i16.f16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, half, iXLen)
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16i16.i16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, half, iXLen)
 
 define void @test_sf_vc_fvv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_fvv_se_e16m8:
@@ -2506,11 +2506,11 @@ define void @test_sf_vc_fvv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16
 ; CHECK-NEXT:    sf.vc.fvv 1, v8, v16, fa0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32i16.f16.iXLen(iXLen 1, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32i16.i16.iXLen(iXLen 1, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32i16.f16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, half, iXLen)
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32i16.i16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, half, iXLen)
 
 define void @test_sf_vc_fvv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_fvv_se_e32mf2:
@@ -2636,11 +2636,11 @@ define <vscale x 1 x i16> @test_sf_vc_v_fvv_se_e16mf4(<vscale x 1 x i16> %vd, <v
 ; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv1i16.iXLen.f16.iXLen(iXLen 1, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl)
+  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv1i16.iXLen.i16.iXLen(iXLen 1, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl)
   ret <vscale x 1 x i16> %0
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv1i16.iXLen.f16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, half, iXLen)
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv1i16.iXLen.i16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, half, iXLen)
 
 define <vscale x 2 x i16> @test_sf_vc_v_fvv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16mf2:
@@ -2649,11 +2649,11 @@ define <vscale x 2 x i16> @test_sf_vc_v_fvv_se_e16mf2(<vscale x 2 x i16> %vd, <v
 ; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv2i16.iXLen.f16.iXLen(iXLen 1, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl)
+  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv2i16.iXLen.i16.iXLen(iXLen 1, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl)
   ret <vscale x 2 x i16> %0
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv2i16.iXLen.f16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, half, iXLen)
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv2i16.iXLen.i16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, half, iXLen)
 
 define <vscale x 4 x i16> @test_sf_vc_v_fvv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m1:
@@ -2662,11 +2662,11 @@ define <vscale x 4 x i16> @test_sf_vc_v_fvv_se_e16m1(<vscale x 4 x i16> %vd, <vs
 ; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv4i16.iXLen.f16.iXLen(iXLen 1, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl)
+  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv4i16.iXLen.i16.iXLen(iXLen 1, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl)
   ret <vscale x 4 x i16> %0
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv4i16.iXLen.f16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, half, iXLen)
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv4i16.iXLen.i16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, half, iXLen)
 
 define <vscale x 8 x i16> @test_sf_vc_v_fvv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m2:
@@ -2675,11 +2675,11 @@ define <vscale x 8 x i16> @test_sf_vc_v_fvv_se_e16m2(<vscale x 8 x i16> %vd, <vs
 ; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v10, fa0
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv8i16.iXLen.f16.iXLen(iXLen 1, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl)
+  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv8i16.iXLen.i16.iXLen(iXLen 1, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl)
   ret <vscale x 8 x i16> %0
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv8i16.iXLen.f16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, half, iXLen)
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv8i16.iXLen.i16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, half, iXLen)
 
 define <vscale x 16 x i16> @test_sf_vc_v_fvv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m4:
@@ -2688,11 +2688,11 @@ define <vscale x 16 x i16> @test_sf_vc_v_fvv_se_e16m4(<vscale x 16 x i16> %vd, <
 ; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v12, fa0
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv16i16.iXLen.f16.iXLen(iXLen 1, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl)
+  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv16i16.iXLen.i16.iXLen(iXLen 1, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl)
   ret <vscale x 16 x i16> %0
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv16i16.iXLen.f16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, half, iXLen)
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv16i16.iXLen.i16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, half, iXLen)
 
 define <vscale x 32 x i16> @test_sf_vc_v_fvv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m8:
@@ -2701,11 +2701,11 @@ define <vscale x 32 x i16> @test_sf_vc_v_fvv_se_e16m8(<vscale x 32 x i16> %vd, <
 ; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v16, fa0
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv32i16.iXLen.f16.iXLen(iXLen 1, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl)
+  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv32i16.iXLen.i16.iXLen(iXLen 1, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl)
   ret <vscale x 32 x i16> %0
 }
 
-declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv32i16.iXLen.f16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, half, iXLen)
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv32i16.iXLen.i16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, half, iXLen)
 
 define <vscale x 1 x i32> @test_sf_vc_v_fvv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_v_fvv_se_e32mf2:
@@ -2831,11 +2831,11 @@ define <vscale x 1 x i16> @test_sf_vc_v_fvv_e16mf4(<vscale x 1 x i16> %vd, <vsca
 ; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.nxv1i16.iXLen.f16.iXLen(iXLen 1, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl)
+  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.nxv1i16.iXLen.i16.iXLen(iXLen 1, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl)
   ret <vscale x 1 x i16> %0
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.nxv1i16.iXLen.f16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, half, iXLen)
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.nxv1i16.iXLen.i16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, half, iXLen)
 
 define <vscale x 2 x i16> @test_sf_vc_v_fvv_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_v_fvv_e16mf2:
@@ -2844,11 +2844,11 @@ define <vscale x 2 x i16> @test_sf_vc_v_fvv_e16mf2(<vscale x 2 x i16> %vd, <vsca
 ; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.nxv2i16.iXLen.f16.iXLen(iXLen 1, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl)
+  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.nxv2i16.iXLen.i16.iXLen(iXLen 1, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl)
   ret <vscale x 2 x i16> %0
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.nxv2i16.iXLen.f16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, half, iXLen)
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.nxv2i16.iXLen.i16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, half, iXLen)
 
 define <vscale x 4 x i16> @test_sf_vc_v_fvv_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_v_fvv_e16m1:
@@ -2857,11 +2857,11 @@ define <vscale x 4 x i16> @test_sf_vc_v_fvv_e16m1(<vscale x 4 x i16> %vd, <vscal
 ; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.nxv4i16.iXLen.f16.iXLen(iXLen 1, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl)
+  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.nxv4i16.iXLen.i16.iXLen(iXLen 1, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl)
   ret <vscale x 4 x i16> %0
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.nxv4i16.iXLen.f16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, half, iXLen)
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.nxv4i16.iXLen.i16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, half, iXLen)
 
 define <vscale x 8 x i16> @test_sf_vc_v_fvv_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_v_fvv_e16m2:
@@ -2870,11 +2870,11 @@ define <vscale x 8 x i16> @test_sf_vc_v_fvv_e16m2(<vscale x 8 x i16> %vd, <vscal
 ; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v10, fa0
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.nxv8i16.iXLen.f16.iXLen(iXLen 1, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl)
+  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.nxv8i16.iXLen.i16.iXLen(iXLen 1, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl)
   ret <vscale x 8 x i16> %0
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.nxv8i16.iXLen.f16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, half, iXLen)
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.nxv8i16.iXLen.i16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, half, iXLen)
 
 define <vscale x 16 x i16> @test_sf_vc_v_fvv_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_v_fvv_e16m4:
@@ -2883,11 +2883,11 @@ define <vscale x 16 x i16> @test_sf_vc_v_fvv_e16m4(<vscale x 16 x i16> %vd, <vsc
 ; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v12, fa0
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.nxv16i16.iXLen.f16.iXLen(iXLen 1, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl)
+  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.nxv16i16.iXLen.i16.iXLen(iXLen 1, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl)
   ret <vscale x 16 x i16> %0
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.nxv16i16.iXLen.f16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, half, iXLen)
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.nxv16i16.iXLen.i16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, half, iXLen)
 
 define <vscale x 32 x i16> @test_sf_vc_v_fvv_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_v_fvv_e16m8:
@@ -2896,11 +2896,11 @@ define <vscale x 32 x i16> @test_sf_vc_v_fvv_e16m8(<vscale x 32 x i16> %vd, <vsc
 ; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v16, fa0
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.nxv32i16.iXLen.f16.iXLen(iXLen 1, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl)
+  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.nxv32i16.iXLen.i16.iXLen(iXLen 1, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl)
   ret <vscale x 32 x i16> %0
 }
 
-declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.nxv32i16.iXLen.f16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, half, iXLen)
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.nxv32i16.iXLen.i16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, half, iXLen)
 
 define <vscale x 1 x i32> @test_sf_vc_v_fvv_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_v_fvv_e32mf2:
@@ -3018,3 +3018,2196 @@ entry:
 }
 
 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvv.nxv8i64.iXLen.f64.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, double, iXLen)
+
+define void @test_f_sf_vc_vvv_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f16.nxv1f16.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f16.nxv1f16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, iXLen)
+
+define void @test_f_sf_vc_vvv_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f16.nxv2f16.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f16.nxv2f16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, iXLen)
+
+define void @test_f_sf_vc_vvv_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f16.nxv4f16.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f16.nxv4f16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, iXLen)
+
+define void @test_f_sf_vc_vvv_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f16.nxv8f16.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f16.nxv8f16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, iXLen)
+
+define void @test_f_sf_vc_vvv_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f16.nxv16f16.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f16.nxv16f16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, <vscale x 16 x half>, iXLen)
+
+define void @test_f_sf_vc_vvv_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re16.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32f16.nxv32f16.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32f16.nxv32f16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, <vscale x 32 x half>, iXLen)
+
+define void @test_f_sf_vc_vvv_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f32.nxv1f32.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f32.nxv1f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, iXLen)
+
+define void @test_f_sf_vc_vvv_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f32.nxv2f32.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f32.nxv2f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, iXLen)
+
+define void @test_f_sf_vc_vvv_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f32.nxv4f32.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f32.nxv4f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, iXLen)
+
+define void @test_f_sf_vc_vvv_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f32.nxv8f32.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f32.nxv8f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x float>, iXLen)
+
+define void @test_f_sf_vc_vvv_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re32.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f32.nxv16f32.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f32.nxv16f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x float>, iXLen)
+
+define void @test_f_sf_vc_vvv_se_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f64.nxv1f64.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f64.nxv1f64.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, iXLen)
+
+define void @test_f_sf_vc_vvv_se_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f64.nxv2f64.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f64.nxv2f64.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, iXLen)
+
+define void @test_f_sf_vc_vvv_se_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f64.nxv4f64.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f64.nxv4f64.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x double>, <vscale x 4 x double>, iXLen)
+
+define void @test_f_sf_vc_vvv_se_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re64.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f64.nxv8f64.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f64.nxv8f64.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x double>, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_vvv_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv1f16.iXLen.nxv1f16.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv1f16.iXLen.nxv1f16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_vvv_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv2f16.iXLen.nxv2f16.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv2f16.iXLen.nxv2f16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_vvv_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv4f16.iXLen.nxv4f16.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv4f16.iXLen.nxv4f16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_vvv_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv8f16.iXLen.nxv8f16.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv8f16.iXLen.nxv8f16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_vvv_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv16f16.iXLen.nxv16f16.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv16f16.iXLen.nxv16f16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, <vscale x 16 x half>, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_vvv_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re16.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv32f16.iXLen.nxv32f16.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv32f16.iXLen.nxv32f16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, <vscale x 32 x half>, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_vvv_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv1f32.iXLen.nxv1f32.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv1f32.iXLen.nxv1f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_vvv_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv2f32.iXLen.nxv2f32.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv2f32.iXLen.nxv2f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_vvv_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv4f32.iXLen.nxv4f32.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv4f32.iXLen.nxv4f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_vvv_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv8f32.iXLen.nxv8f32.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv8f32.iXLen.nxv8f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x float>, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_vvv_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re32.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv16f32.iXLen.nxv16f32.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv16f32.iXLen.nxv16f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x float>, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_vvv_se_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv1f64.iXLen.nxv1f64.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv1f64.iXLen.nxv1f64.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_vvv_se_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv2f64.iXLen.nxv2f64.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv2f64.iXLen.nxv2f64.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_vvv_se_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv4f64.iXLen.nxv4f64.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv4f64.iXLen.nxv4f64.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x double>, <vscale x 4 x double>, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_vvv_se_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re64.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv8f64.iXLen.nxv8f64.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv8f64.iXLen.nxv8f64.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x double>, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_vvv_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.vvv.nxv1f16.iXLen.nxv1f16.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.vvv.nxv1f16.iXLen.nxv1f16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_vvv_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.vvv.nxv2f16.iXLen.nxv2f16.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.vvv.nxv2f16.iXLen.nxv2f16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_vvv_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.vvv.nxv4f16.iXLen.nxv4f16.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.vvv.nxv4f16.iXLen.nxv4f16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_vvv_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.vvv.nxv8f16.iXLen.nxv8f16.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.vvv.nxv8f16.iXLen.nxv8f16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_vvv_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.vvv.nxv16f16.iXLen.nxv16f16.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.vvv.nxv16f16.iXLen.nxv16f16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, <vscale x 16 x half>, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_vvv_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re16.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.vvv.nxv32f16.iXLen.nxv32f16.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.vvv.nxv32f16.iXLen.nxv32f16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, <vscale x 32 x half>, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_vvv_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvv.nxv1f32.iXLen.nxv1f32.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvv.nxv1f32.iXLen.nxv1f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_vvv_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvv.nxv2f32.iXLen.nxv2f32.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvv.nxv2f32.iXLen.nxv2f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_vvv_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvv.nxv4f32.iXLen.nxv4f32.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvv.nxv4f32.iXLen.nxv4f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_vvv_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvv.nxv8f32.iXLen.nxv8f32.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvv.nxv8f32.iXLen.nxv8f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x float>, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_vvv_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re32.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvv.nxv16f32.iXLen.nxv16f32.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvv.nxv16f32.iXLen.nxv16f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x float>, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_vvv_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvv.nxv1f64.iXLen.nxv1f64.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvv.nxv1f64.iXLen.nxv1f64.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_vvv_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvv.nxv2f64.iXLen.nxv2f64.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvv.nxv2f64.iXLen.nxv2f64.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_vvv_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvv.nxv4f64.iXLen.nxv4f64.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvv.nxv4f64.iXLen.nxv4f64.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x double>, <vscale x 4 x double>, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_vvv_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re64.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvv.nxv8f64.iXLen.nxv8f64.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvv.nxv8f64.iXLen.nxv8f64.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x double>, iXLen)
+
+define void @test_f_sf_vc_xvv_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1f16.i16.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1f16.i16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xvv_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2f16.i16.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2f16.i16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xvv_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4f16.i16.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4f16.i16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xvv_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8f16.i16.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8f16.i16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xvv_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16f16.i16.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16f16.i16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xvv_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32f16.i16.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32f16.i16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xvv_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.f32.nxv1f32.iXLen.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.f32.nxv1f32.iXLen.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, i32, iXLen)
+
+define void @test_f_sf_vc_xvv_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.f32.nxv2f32.iXLen.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.f32.nxv2f32.iXLen.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, i32, iXLen)
+
+define void @test_f_sf_vc_xvv_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.f32.nxv4f32.iXLen.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.f32.nxv4f32.iXLen.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, i32, iXLen)
+
+define void @test_f_sf_vc_xvv_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.f32.nxv8f32.iXLen.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.f32.nxv8f32.iXLen.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, i32, iXLen)
+
+define void @test_f_sf_vc_xvv_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.f32.nxv16f32.iXLen.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.f32.nxv16f32.iXLen.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, i32, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_xvv_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv1f16.iXLen.i16.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv1f16.iXLen.i16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, i16, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_xvv_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv2f16.iXLen.i16.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv2f16.iXLen.i16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, i16, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_xvv_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv4f16.iXLen.i16.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv4f16.iXLen.i16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, i16, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_xvv_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv8f16.iXLen.i16.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv8f16.iXLen.i16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, i16, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_xvv_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv16f16.iXLen.i16.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv16f16.iXLen.i16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, i16, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_xvv_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv32f16.iXLen.i16.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv32f16.iXLen.i16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, i16, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_xvv_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv1f32.iXLen.f32.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv1f32.iXLen.f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, i32, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_xvv_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv2f32.iXLen.f32.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv2f32.iXLen.f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, i32, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_xvv_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv4f32.iXLen.f32.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv4f32.iXLen.f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, i32, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_xvv_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv8f32.iXLen.f32.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv8f32.iXLen.f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, i32, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_xvv_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv16f32.iXLen.f32.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv16f32.iXLen.f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, i32, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_xvv_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.xvv.nxv1f16.iXLen.i16.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.xvv.nxv1f16.iXLen.i16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, i16, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_xvv_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.xvv.nxv2f16.iXLen.i16.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.xvv.nxv2f16.iXLen.i16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, i16, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_xvv_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.xvv.nxv4f16.iXLen.i16.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.xvv.nxv4f16.iXLen.i16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, i16, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_xvv_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.xvv.nxv8f16.iXLen.i16.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.xvv.nxv8f16.iXLen.i16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, i16, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_xvv_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.xvv.nxv16f16.iXLen.i16.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.xvv.nxv16f16.iXLen.i16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, i16, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_xvv_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.xvv.nxv32f16.iXLen.i16.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.xvv.nxv32f16.iXLen.i16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, i16, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_xvv_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvv.nxv1f32.iXLen.f32.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvv.nxv1f32.iXLen.f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, i32, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_xvv_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvv.nxv2f32.iXLen.f32.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvv.nxv2f32.iXLen.f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, i32, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_xvv_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvv.nxv4f32.iXLen.f32.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvv.nxv4f32.iXLen.f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, i32, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_xvv_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvv.nxv8f32.iXLen.f32.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvv.nxv8f32.iXLen.f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, i32, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_xvv_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvv.nxv16f32.iXLen.f32.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvv.nxv16f32.iXLen.f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, i32, iXLen)
+
+define void @test_f_sf_vc_ivv_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f16.iXLen.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f16.iXLen.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivv_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f16.iXLen.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f16.iXLen.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivv_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f16.iXLen.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f16.iXLen.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivv_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f16.iXLen.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f16.iXLen.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivv_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f16.iXLen.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f16.iXLen.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivv_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32f16.iXLen.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32f16.iXLen.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivv_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f32.iXLen.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f32.iXLen.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivv_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f32.iXLen.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f32.iXLen.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivv_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f32.iXLen.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f32.iXLen.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivv_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f32.iXLen.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f32.iXLen.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivv_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f32.iXLen.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f32.iXLen.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivv_se_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f64.iXLen.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f64.iXLen.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x double>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivv_se_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f64.iXLen.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f64.iXLen.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x double>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivv_se_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f64.iXLen.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f64.iXLen.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x double>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivv_se_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f64.iXLen.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f64.iXLen.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, iXLen, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_ivv_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv1f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv1f16.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, iXLen, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_ivv_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv2f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv2f16.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, iXLen, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_ivv_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv4f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv4f16.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, iXLen, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_ivv_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv8f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv8f16.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, iXLen, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_ivv_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv16f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv16f16.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, iXLen, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_ivv_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv32f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv32f16.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, iXLen, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_ivv_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv1f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv1f32.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, iXLen, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_ivv_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv2f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv2f32.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, iXLen, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_ivv_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv4f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv4f32.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, iXLen, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_ivv_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv8f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv8f32.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, iXLen, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_ivv_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv16f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv16f32.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, iXLen, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_ivv_se_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivv.se.nxv1f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivv.se.nxv1f64.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x double>, iXLen, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_ivv_se_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivv.se.nxv2f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivv.se.nxv2f64.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x double>, iXLen, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_ivv_se_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivv.se.nxv4f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivv.se.nxv4f64.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x double>, iXLen, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_ivv_se_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivv.se.nxv8f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivv.se.nxv8f64.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, iXLen, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_ivv_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.ivv.nxv1f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.ivv.nxv1f16.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, iXLen, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_ivv_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.ivv.nxv2f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.ivv.nxv2f16.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, iXLen, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_ivv_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.ivv.nxv4f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.ivv.nxv4f16.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, iXLen, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_ivv_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.ivv.nxv8f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.ivv.nxv8f16.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, iXLen, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_ivv_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.ivv.nxv16f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.ivv.nxv16f16.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, iXLen, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_ivv_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.ivv.nxv32f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.ivv.nxv32f16.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, iXLen, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_ivv_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivv.nxv1f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivv.nxv1f32.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, iXLen, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_ivv_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivv.nxv2f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivv.nxv2f32.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, iXLen, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_ivv_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivv.nxv4f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivv.nxv4f32.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, iXLen, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_ivv_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivv.nxv8f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivv.nxv8f32.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, iXLen, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_ivv_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivv.nxv16f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivv.nxv16f32.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, iXLen, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_ivv_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivv.nxv1f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivv.nxv1f64.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x double>, iXLen, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_ivv_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivv.nxv2f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivv.nxv2f64.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x double>, iXLen, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_ivv_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivv.nxv4f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivv.nxv4f64.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x double>, iXLen, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_ivv_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivv.nxv8f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivv.nxv8f64.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, iXLen, iXLen)
+
+define void @test_f_sf_vc_fvv_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f16.i16.iXLen(iXLen 1, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f16.i16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fvv_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f16.i16.iXLen(iXLen 1, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f16.i16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fvv_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f16.i16.iXLen(iXLen 1, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f16.i16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fvv_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f16.i16.iXLen(iXLen 1, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f16.i16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fvv_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f16.i16.iXLen(iXLen 1, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f16.i16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fvv_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32f16.i16.iXLen(iXLen 1, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32f16.i16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fvv_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f32.f32.iXLen(iXLen 1, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f32.f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, float, iXLen)
+
+define void @test_f_sf_vc_fvv_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f32.f32.iXLen(iXLen 1, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f32.f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, float, iXLen)
+
+define void @test_f_sf_vc_fvv_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f32.f32.iXLen(iXLen 1, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f32.f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, float, iXLen)
+
+define void @test_f_sf_vc_fvv_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f32.f32.iXLen(iXLen 1, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f32.f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, float, iXLen)
+
+define void @test_f_sf_vc_fvv_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f32.f32.iXLen(iXLen 1, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f32.f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, float, iXLen)
+
+define void @test_f_sf_vc_fvv_se_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f64.f64.iXLen(iXLen 1, <vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, double %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f64.f64.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x double>, double, iXLen)
+
+define void @test_f_sf_vc_fvv_se_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f64.f64.iXLen(iXLen 1, <vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, double %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f64.f64.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x double>, double, iXLen)
+
+define void @test_f_sf_vc_fvv_se_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f64.f64.iXLen(iXLen 1, <vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, double %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f64.f64.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x double>, double, iXLen)
+
+define void @test_f_sf_vc_fvv_se_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f64.f64.iXLen(iXLen 1, <vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, double %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f64.f64.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, double, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_fvv_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv1f16.iXLen.i16.iXLen(iXLen 1, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv1f16.iXLen.i16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, half, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_fvv_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv2f16.iXLen.i16.iXLen(iXLen 1, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv2f16.iXLen.i16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, half, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_fvv_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv4f16.iXLen.i16.iXLen(iXLen 1, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv4f16.iXLen.i16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, half, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_fvv_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv8f16.iXLen.i16.iXLen(iXLen 1, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv8f16.iXLen.i16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, half, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_fvv_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv16f16.iXLen.i16.iXLen(iXLen 1, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv16f16.iXLen.i16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, half, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_fvv_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv32f16.iXLen.i16.iXLen(iXLen 1, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv32f16.iXLen.i16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, half, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_fvv_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv1f32.iXLen.f32.iXLen(iXLen 1, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv1f32.iXLen.f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, float, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_fvv_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv2f32.iXLen.f32.iXLen(iXLen 1, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv2f32.iXLen.f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, float, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_fvv_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv4f32.iXLen.f32.iXLen(iXLen 1, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv4f32.iXLen.f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, float, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_fvv_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv8f32.iXLen.f32.iXLen(iXLen 1, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv8f32.iXLen.f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, float, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_fvv_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv16f32.iXLen.f32.iXLen(iXLen 1, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv16f32.iXLen.f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, float, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_fvv_se_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvv.se.nxv1f64.iXLen.f64.iXLen(iXLen 1, <vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvv.se.nxv1f64.iXLen.f64.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x double>, double, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_fvv_se_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvv.se.nxv2f64.iXLen.f64.iXLen(iXLen 1, <vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvv.se.nxv2f64.iXLen.f64.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x double>, double, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_fvv_se_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvv.se.nxv4f64.iXLen.f64.iXLen(iXLen 1, <vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvv.se.nxv4f64.iXLen.f64.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x double>, double, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_fvv_se_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvv.se.nxv8f64.iXLen.f64.iXLen(iXLen 1, <vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvv.se.nxv8f64.iXLen.f64.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, double, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_fvv_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.fvv.nxv1f16.iXLen.i16.iXLen(iXLen 1, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.fvv.nxv1f16.iXLen.i16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, half, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_fvv_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.fvv.nxv2f16.iXLen.i16.iXLen(iXLen 1, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.fvv.nxv2f16.iXLen.i16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, half, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_fvv_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.fvv.nxv4f16.iXLen.i16.iXLen(iXLen 1, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.fvv.nxv4f16.iXLen.i16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, half, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_fvv_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.fvv.nxv8f16.iXLen.i16.iXLen(iXLen 1, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.fvv.nxv8f16.iXLen.i16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, half, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_fvv_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.fvv.nxv16f16.iXLen.i16.iXLen(iXLen 1, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.fvv.nxv16f16.iXLen.i16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, half, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_fvv_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.fvv.nxv32f16.iXLen.i16.iXLen(iXLen 1, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.fvv.nxv32f16.iXLen.i16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, half, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_fvv_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvv.nxv1f32.iXLen.f32.iXLen(iXLen 1, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvv.nxv1f32.iXLen.f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, float, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_fvv_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvv.nxv2f32.iXLen.f32.iXLen(iXLen 1, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvv.nxv2f32.iXLen.f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, float, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_fvv_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvv.nxv4f32.iXLen.f32.iXLen(iXLen 1, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvv.nxv4f32.iXLen.f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, float, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_fvv_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvv.nxv8f32.iXLen.f32.iXLen(iXLen 1, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvv.nxv8f32.iXLen.f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, float, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_fvv_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvv.nxv16f32.iXLen.f32.iXLen(iXLen 1, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvv.nxv16f32.iXLen.f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, float, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_fvv_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvv.nxv1f64.iXLen.f64.iXLen(iXLen 1, <vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvv.nxv1f64.iXLen.f64.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x double>, double, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_fvv_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvv.nxv2f64.iXLen.f64.iXLen(iXLen 1, <vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvv.nxv2f64.iXLen.f64.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x double>, double, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_fvv_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvv.nxv4f64.iXLen.f64.iXLen(iXLen 1, <vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvv.nxv4f64.iXLen.f64.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x double>, double, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_fvv_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvv.nxv8f64.iXLen.f64.iXLen(iXLen 1, <vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvv.nxv8f64.iXLen.f64.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, double, iXLen)
diff --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvw.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvw.ll
index 2d6ac8d55fc152e..56e6b345e2cea29 100644
--- a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvw.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvw.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN:  sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+xsfvcp \
+; RUN:  sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+xsfvcp \
 ; RUN:    -verify-machineinstrs | FileCheck %s
-; RUN:  sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+xsfvcp \
+; RUN:  sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+xsfvcp \
 ; RUN:    -verify-machineinstrs | FileCheck %s
 
 define void @test_sf_vc_vvw_se_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
@@ -2109,3 +2109,1407 @@ entry:
 }
 
 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvw.nxv8i64.iXLen.nxv8i32.f32.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, float, iXLen)
+
+define void @test_f_sf_vc_vvw_se_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvw_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f32.nxv1f16.nxv1f16.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f32.nxv1f16.nxv1f16.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, <vscale x 1 x half>, iXLen)
+
+define void @test_f_sf_vc_vvw_se_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvw_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f32.nxv2f16.nxv2f16.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f32.nxv2f16.nxv2f16.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, <vscale x 2 x half>, iXLen)
+
+define void @test_f_sf_vc_vvw_se_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvw_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.vvw 3, v8, v10, v11
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f32.nxv4f16.nxv4f16.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f32.nxv4f16.nxv4f16.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, <vscale x 4 x half>, iXLen)
+
+define void @test_f_sf_vc_vvw_se_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvw_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.vvw 3, v8, v12, v14
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f32.nxv8f16.nxv8f16.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f32.nxv8f16.nxv8f16.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, <vscale x 8 x half>, iXLen)
+
+define void @test_f_sf_vc_vvw_se_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvw_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.vvw 3, v8, v16, v20
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16f32.nxv16f16.nxv16f16.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16f32.nxv16f16.nxv16f16.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, <vscale x 16 x half>, iXLen)
+
+define void @test_f_sf_vc_vvw_se_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvw_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f64.nxv1f32.nxv1f32.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f64.nxv1f32.nxv1f32.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, <vscale x 1 x float>, iXLen)
+
+define void @test_f_sf_vc_vvw_se_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvw_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.vvw 3, v8, v10, v11
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f64.nxv2f32.nxv2f32.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f64.nxv2f32.nxv2f32.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, <vscale x 2 x float>, iXLen)
+
+define void @test_f_sf_vc_vvw_se_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvw_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.vvw 3, v8, v12, v14
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f64.nxv4f32.nxv4f32.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f64.nxv4f32.nxv4f32.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, <vscale x 4 x float>, iXLen)
+
+define void @test_f_sf_vc_vvw_se_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvw_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.vvw 3, v8, v16, v20
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f64.nxv8f32.nxv8f32.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f64.nxv8f32.nxv8f32.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, <vscale x 8 x float>, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_vvw_se_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv1f32.iXLen.nxv1f16.nxv1f16.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv1f32.iXLen.nxv1f16.nxv1f16.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, <vscale x 1 x half>, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_vvw_se_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv2f32.iXLen.nxv2f16.nxv2f16.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv2f32.iXLen.nxv2f16.nxv2f16.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, <vscale x 2 x half>, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_vvw_se_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v10, v11
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv4f32.iXLen.nxv4f16.nxv4f16.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv4f32.iXLen.nxv4f16.nxv4f16.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, <vscale x 4 x half>, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_vvw_se_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v12, v14
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv8f32.iXLen.nxv8f16.nxv8f16.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv8f32.iXLen.nxv8f16.nxv8f16.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, <vscale x 8 x half>, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_vvw_se_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v16, v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv16f32.iXLen.nxv16f16.nxv16f16.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv16f32.iXLen.nxv16f16.nxv16f16.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, <vscale x 16 x half>, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_vvw_se_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv1f64.iXLen.nxv1f32.nxv1f32.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv1f64.iXLen.nxv1f32.nxv1f32.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, <vscale x 1 x float>, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_vvw_se_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v10, v11
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv2f64.iXLen.nxv2f32.nxv2f32.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv2f64.iXLen.nxv2f32.nxv2f32.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, <vscale x 2 x float>, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_vvw_se_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v12, v14
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv4f64.iXLen.nxv4f32.nxv4f32.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv4f64.iXLen.nxv4f32.nxv4f32.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, <vscale x 4 x float>, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_vvw_se_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v16, v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv8f64.iXLen.nxv8f32.nxv8f32.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv8f64.iXLen.nxv8f32.nxv8f32.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, <vscale x 8 x float>, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_vvw_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvw.nxv1f32.iXLen.nxv1f16.nxv1f16.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvw.nxv1f32.iXLen.nxv1f16.nxv1f16.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, <vscale x 1 x half>, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_vvw_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvw.nxv2f32.iXLen.nxv2f16.nxv2f16.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvw.nxv2f32.iXLen.nxv2f16.nxv2f16.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, <vscale x 2 x half>, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_vvw_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v10, v11
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvw.nxv4f32.iXLen.nxv4f16.nxv4f16.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvw.nxv4f32.iXLen.nxv4f16.nxv4f16.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, <vscale x 4 x half>, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_vvw_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v12, v14
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvw.nxv8f32.iXLen.nxv8f16.nxv8f16.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvw.nxv8f32.iXLen.nxv8f16.nxv8f16.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, <vscale x 8 x half>, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_vvw_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v16, v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvw.nxv16f32.iXLen.nxv16f16.nxv16f16.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvw.nxv16f32.iXLen.nxv16f16.nxv16f16.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, <vscale x 16 x half>, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_vvw_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvw.nxv1f64.iXLen.nxv1f32.nxv1f32.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvw.nxv1f64.iXLen.nxv1f32.nxv1f32.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, <vscale x 1 x float>, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_vvw_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v10, v11
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvw.nxv2f64.iXLen.nxv2f32.nxv2f32.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvw.nxv2f64.iXLen.nxv2f32.nxv2f32.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, <vscale x 2 x float>, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_vvw_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v12, v14
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvw.nxv4f64.iXLen.nxv4f32.nxv4f32.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvw.nxv4f64.iXLen.nxv4f32.nxv4f32.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, <vscale x 4 x float>, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_vvw_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v16, v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvw.nxv8f64.iXLen.nxv8f32.nxv8f32.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvw.nxv8f64.iXLen.nxv8f32.nxv8f32.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, <vscale x 8 x float>, iXLen)
+
+define void @test_f_sf_vc_xvw_se_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvw_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f32.nxv1f16.i16.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f32.nxv1f16.i16.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xvw_se_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvw_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f32.nxv2f16.i16.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f32.nxv2f16.i16.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xvw_se_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvw_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.xvw 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f32.nxv4f16.i16.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f32.nxv4f16.i16.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xvw_se_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvw_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.xvw 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f32.nxv8f16.i16.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f32.nxv8f16.i16.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xvw_se_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvw_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.xvw 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16f32.nxv16f16.i16.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16f32.nxv16f16.i16.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xvw_se_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvw_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f64.nxv1f32.i32.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f64.nxv1f32.i32.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, i32, iXLen)
+
+define void @test_f_sf_vc_xvw_se_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvw_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.xvw 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f64.nxv2f32.i32.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f64.nxv2f32.i32.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, i32, iXLen)
+
+define void @test_f_sf_vc_xvw_se_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvw_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.xvw 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f64.nxv4f32.i32.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f64.nxv4f32.i32.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, i32, iXLen)
+
+define void @test_f_sf_vc_xvw_se_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvw_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.xvw 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f64.nxv8f32.i32.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f64.nxv8f32.i32.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, i32, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_xvw_se_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv1f32.iXLen.nxv1f16.i16.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv1f32.iXLen.nxv1f16.i16.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, i16, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_xvw_se_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv2f32.iXLen.nxv2f16.i16.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv2f32.iXLen.nxv2f16.i16.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, i16, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_xvw_se_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv4f32.iXLen.nxv4f16.i16.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv4f32.iXLen.nxv4f16.i16.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, i16, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_xvw_se_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv8f32.iXLen.nxv8f16.i16.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv8f32.iXLen.nxv8f16.i16.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, i16, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_xvw_se_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv16f32.iXLen.nxv16f16.i16.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv16f32.iXLen.nxv16f16.i16.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, i16, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_xvw_se_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv1f64.i32.nxv1f32.iXLen.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv1f64.i32.nxv1f32.iXLen.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, i32, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_xvw_se_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv2f64.i32.nxv2f32.iXLen.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv2f64.i32.nxv2f32.iXLen.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, i32, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_xvw_se_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv4f64.i32.nxv4f32.iXLen.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv4f64.i32.nxv4f32.iXLen.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, i32, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_xvw_se_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv8f64.i32.nxv8f32.iXLen.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv8f64.i32.nxv8f32.iXLen.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, i32, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_xvw_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvw.nxv1f32.iXLen.nxv1f16.i16.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvw.nxv1f32.iXLen.nxv1f16.i16.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, i16, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_xvw_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvw.nxv2f32.iXLen.nxv2f16.i16.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvw.nxv2f32.iXLen.nxv2f16.i16.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, i16, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_xvw_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvw.nxv4f32.iXLen.nxv4f16.i16.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvw.nxv4f32.iXLen.nxv4f16.i16.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, i16, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_xvw_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvw.nxv8f32.iXLen.nxv8f16.i16.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvw.nxv8f32.iXLen.nxv8f16.i16.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, i16, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_xvw_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvw.nxv16f32.iXLen.nxv16f16.i16.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvw.nxv16f32.iXLen.nxv16f16.i16.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, i16, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_xvw_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.xvw.nxv1f64.iXLen.nxv1f32.i32.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.xvw.nxv1f64.iXLen.nxv1f32.i32.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, i32, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_xvw_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.xvw.nxv2f64.iXLen.nxv2f32.i32.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.xvw.nxv2f64.iXLen.nxv2f32.i32.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, i32, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_xvw_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.xvw.nxv4f64.iXLen.nxv4f32.i32.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.xvw.nxv4f64.iXLen.nxv4f32.i32.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, i32, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_xvw_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.xvw.nxv8f64.iXLen.nxv8f32.i32.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.xvw.nxv8f64.iXLen.nxv8f32.i32.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, i32, iXLen)
+
+define void @test_f_sf_vc_ivw_se_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivw_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f32.nxv1f16.iXLen.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f32.nxv1f16.iXLen.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivw_se_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivw_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f32.nxv2f16.iXLen.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f32.nxv2f16.iXLen.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivw_se_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivw_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.ivw 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f32.nxv4f16.iXLen.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f32.nxv4f16.iXLen.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivw_se_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivw_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.ivw 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f32.nxv8f16.iXLen.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f32.nxv8f16.iXLen.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivw_se_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivw_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.ivw 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16f32.nxv16f16.iXLen.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16f32.nxv16f16.iXLen.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivw_se_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivw_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f64.nxv1f32.iXLen.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f64.nxv1f32.iXLen.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivw_se_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivw_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.ivw 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f64.nxv2f32.iXLen.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f64.nxv2f32.iXLen.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivw_se_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivw_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.ivw 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f64.nxv4f32.iXLen.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f64.nxv4f32.iXLen.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivw_se_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivw_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.ivw 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f64.nxv8f32.iXLen.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f64.nxv8f32.iXLen.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, iXLen, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_ivw_se_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv1f32.iXLen.nxv1f16.iXLen.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv1f32.iXLen.nxv1f16.iXLen.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, iXLen, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_ivw_se_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv2f32.iXLen.nxv2f16.iXLen.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv2f32.iXLen.nxv2f16.iXLen.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, iXLen, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_ivw_se_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv4f32.iXLen.nxv4f16.iXLen.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv4f32.iXLen.nxv4f16.iXLen.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, iXLen, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_ivw_se_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv8f32.iXLen.nxv8f16.iXLen.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv8f32.iXLen.nxv8f16.iXLen.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, iXLen, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_ivw_se_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv16f32.iXLen.nxv16f16.iXLen.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv16f32.iXLen.nxv16f16.iXLen.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, iXLen, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_ivw_se_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv1f64.iXLen.nxv1f32.iXLen.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv1f64.iXLen.nxv1f32.iXLen.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, iXLen, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_ivw_se_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv2f64.iXLen.nxv2f32.iXLen.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv2f64.iXLen.nxv2f32.iXLen.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, iXLen, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_ivw_se_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv4f64.iXLen.nxv4f32.iXLen.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv4f64.iXLen.nxv4f32.iXLen.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, iXLen, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_ivw_se_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv8f64.iXLen.nxv8f32.iXLen.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv8f64.iXLen.nxv8f32.iXLen.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, iXLen, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_ivw_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivw.nxv1f32.iXLen.nxv1f16.iXLen.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivw.nxv1f32.iXLen.nxv1f16.iXLen.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, iXLen, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_ivw_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivw.nxv2f32.iXLen.nxv2f16.iXLen.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivw.nxv2f32.iXLen.nxv2f16.iXLen.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, iXLen, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_ivw_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivw.nxv4f32.iXLen.nxv4f16.iXLen.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivw.nxv4f32.iXLen.nxv4f16.iXLen.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, iXLen, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_ivw_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivw.nxv8f32.iXLen.nxv8f16.iXLen.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivw.nxv8f32.iXLen.nxv8f16.iXLen.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, iXLen, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_ivw_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivw.nxv16f32.iXLen.nxv16f16.iXLen.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivw.nxv16f32.iXLen.nxv16f16.iXLen.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, iXLen, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_ivw_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivw.nxv1f64.iXLen.nxv1f32.iXLen.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivw.nxv1f64.iXLen.nxv1f32.iXLen.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, iXLen, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_ivw_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivw.nxv2f64.iXLen.nxv2f32.iXLen.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivw.nxv2f64.iXLen.nxv2f32.iXLen.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, iXLen, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_ivw_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivw.nxv4f64.iXLen.nxv4f32.iXLen.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivw.nxv4f64.iXLen.nxv4f32.iXLen.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, iXLen, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_ivw_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivw.nxv8f64.iXLen.nxv8f32.iXLen.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivw.nxv8f64.iXLen.nxv8f32.iXLen.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, iXLen, iXLen)
+
+define void @test_f_sf_vc_fvw_se_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvw_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.fvw 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f32.nxv1f16.i16.iXLen(iXLen 1, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f32.nxv1f16.i16.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fvw_se_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvw_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.fvw 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f32.nxv2f16.i16.iXLen(iXLen 1, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f32.nxv2f16.i16.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fvw_se_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvw_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.fvw 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f32.nxv4f16.i16.iXLen(iXLen 1, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f32.nxv4f16.i16.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fvw_se_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvw_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.fvw 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f32.nxv8f16.i16.iXLen(iXLen 1, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f32.nxv8f16.i16.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fvw_se_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvw_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.fvw 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv16f32.nxv16f16.i16.iXLen(iXLen 1, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv16f32.nxv16f16.i16.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fvw_se_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvw_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.fvw 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f64.nxv1f32.i32.iXLen(iXLen 1, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f64.nxv1f32.i32.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, float, iXLen)
+
+define void @test_f_sf_vc_fvw_se_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvw_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.fvw 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f64.nxv2f32.i32.iXLen(iXLen 1, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f64.nxv2f32.i32.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, float, iXLen)
+
+define void @test_f_sf_vc_fvw_se_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvw_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.fvw 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f64.nxv4f32.i32.iXLen(iXLen 1, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f64.nxv4f32.i32.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, float, iXLen)
+
+define void @test_f_sf_vc_fvw_se_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvw_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.fvw 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f64.nxv8f32.i32.iXLen(iXLen 1, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f64.nxv8f32.i32.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, float, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_fvw_se_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv1f32.iXLen.nxv1f16.i16.iXLen(iXLen 1, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv1f32.iXLen.nxv1f16.i16.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, half, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_fvw_se_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv2f32.iXLen.nxv2f16.i16.iXLen(iXLen 1, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv2f32.iXLen.nxv2f16.i16.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, half, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_fvw_se_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv4f32.iXLen.nxv4f16.i16.iXLen(iXLen 1, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv4f32.iXLen.nxv4f16.i16.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, half, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_fvw_se_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv8f32.iXLen.nxv8f16.i16.iXLen(iXLen 1, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv8f32.iXLen.nxv8f16.i16.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, half, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_fvw_se_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv16f32.iXLen.nxv16f16.i16.iXLen(iXLen 1, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv16f32.iXLen.nxv16f16.i16.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, half, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_fvw_se_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv1f64.iXLen.nxv1f32.i32.iXLen(iXLen 1, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv1f64.iXLen.nxv1f32.i32.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, float, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_fvw_se_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv2f64.iXLen.nxv2f32.i32.iXLen(iXLen 1, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv2f64.iXLen.nxv2f32.i32.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, float, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_fvw_se_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv4f64.iXLen.nxv4f32.i32.iXLen(iXLen 1, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv4f64.iXLen.nxv4f32.i32.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, float, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_fvw_se_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv8f64.iXLen.nxv8f32.i32.iXLen(iXLen 1, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv8f64.iXLen.nxv8f32.i32.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, float, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_fvw_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvw.nxv1f32.iXLen.nxv1f16.i16.iXLen(iXLen 1, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvw.nxv1f32.iXLen.nxv1f16.i16.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, half, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_fvw_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvw.nxv2f32.iXLen.nxv2f16.i16.iXLen(iXLen 1, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvw.nxv2f32.iXLen.nxv2f16.i16.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, half, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_fvw_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvw.nxv4f32.iXLen.nxv4f16.i16.iXLen(iXLen 1, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvw.nxv4f32.iXLen.nxv4f16.i16.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, half, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_fvw_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvw.nxv8f32.iXLen.nxv8f16.i16.iXLen(iXLen 1, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvw.nxv8f32.iXLen.nxv8f16.i16.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, half, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_fvw_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvw.nxv16f32.iXLen.nxv16f16.i16.iXLen(iXLen 1, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvw.nxv16f32.iXLen.nxv16f16.i16.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, half, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_fvw_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvw.nxv1f64.iXLen.nxv1f32.i32.iXLen(iXLen 1, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvw.nxv1f64.iXLen.nxv1f32.i32.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, float, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_fvw_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvw.nxv2f64.iXLen.nxv2f32.i32.iXLen(iXLen 1, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvw.nxv2f64.iXLen.nxv2f32.i32.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, float, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_fvw_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvw.nxv4f64.iXLen.nxv4f32.i32.iXLen(iXLen 1, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvw.nxv4f64.iXLen.nxv4f32.i32.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, float, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_fvw_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvw.nxv8f64.iXLen.nxv8f32.i32.iXLen(iXLen 1, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvw.nxv8f64.iXLen.nxv8f32.i32.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, float, iXLen)



More information about the cfe-commits mailing list