[llvm-branch-commits] [clang] 53f7416 - [SiFive] Support C intrinsics for xsfvcp extension.

via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Mon Nov 14 23:54:10 PST 2022


Author: Nelson Chu
Date: 2022-11-14T23:51:20-08:00
New Revision: 53f74164eccec516994e4ba4824e2a8841702f88

URL: https://github.com/llvm/llvm-project/commit/53f74164eccec516994e4ba4824e2a8841702f88
DIFF: https://github.com/llvm/llvm-project/commit/53f74164eccec516994e4ba4824e2a8841702f88.diff

LOG: [SiFive] Support C intrinsics for xsfvcp extension.

There are two problems here,

The first problem is that - the payloads are defined as "const int", so I
have to update our RISCVVEmitter, to recognized "i" and it will be always
32-bit Signed integer.

The other problem is that - the sf.vc.x and sf.vc.i intrinsic don't have any
vector types used for their input and output, but the intrinsic names need
"Uv" as the suffix, otherwise we don't know how to add vetveli for them.
The current RISCVVEmitter reports errors for the above case, maybe we should
also update the Emitter, but I figured that we could define workarounds like
the follows,

sf_vc_x_se_u8m1  : RVVVCIXBuiltinSet<"", "0KiKiUvUe", "c", [0, 3]>;
sf_vc_x_se_u16m1 : RVVVCIXBuiltinSet<"", "0KiKiUvUe", "s", [0, 3]>;
sf_vc_x_se_u32m1 : RVVVCIXBuiltinSet<"", "0KiKiUvUe", "i", [0, 3]>;
sf_vc_x_se_u64m1  : RVVVCIXBuiltinSet<"", "0KiKiUvUe", "l", [0, 3]>;

Not sure if the solution is acceptable, but it could work as expected.

Architecture spec: https://docs.google.com/document/d/1t7KkKILmmQ0mAgnDLj1s2ks-UXkMR9cWOHuIsewovPE/edit

Intrinsic spec: https://github.com/sifive/rvv-intrinsic-internal/blob/master/rvv_intrinsic_gen/overloaded_intrinsic_funcs/11_sifive_custom_vector_extension_functions.md#vcix-instructions-xsfvc

Added: 
    clang/test/CodeGen/RISCV/rvv-intrinsics/xsfvcp-x-rv64.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/xsfvcp-x.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/xsfvcp-xv-rv64.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/xsfvcp-xv.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/xsfvcp-xvv-rv64.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/xsfvcp-xvv.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/xsfvcp-xvw.c

Modified: 
    clang/include/clang/Basic/riscv_vector.td
    clang/include/clang/Support/RISCVVIntrinsicUtils.h
    clang/lib/Support/RISCVVIntrinsicUtils.cpp

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index 90e18df0403e0..c000b0dbd6d75 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -68,6 +68,8 @@
 //   t: ptr
diff _t, ignores "t"
 //   u: unsigned long, ignores "t"
 //   l: long, ignores "t"
+//   f: float32, ignores "t"
+//   i: int32, ignores "t"
 //
 // So for instance if t is "i", i.e. int, then "e" will yield int again. "v"
 // will yield an RVV vector type (assume LMUL=1), so __rvv_int32m1_t.
@@ -2390,3 +2392,91 @@ let HasMasked = false, HasVL = false, IRName = "" in {
     }
   }
 }
+
+class VCIXSuffix<string range> {
+  list<string> suffix = !cond(!eq(range, "c"): ["8mf8", "8mf4", "8mf2", "8m1", "8m2", "8m4", "8m8"],
+                              !eq(range, "s"): ["16mf4", "16mf2", "16m1", "16m2", "16m4", "16m8"],
+                              !eq(range, "i"): ["32mf2", "32m1", "32m2", "32m4", "32m8"],
+                              !eq(range, "l"): ["64m1", "64m2", "64m4", "64m8"]);
+}
+
+class VCIXBuiltinSet<string name, string IR_name, string suffix,
+                     string prototype, string type_range,
+                     list<int> intrinsic_types>
+    : RVVBuiltin<suffix, prototype, type_range> {
+  let Name = name;
+  let OverloadedName = name;
+  let IRName = IR_name;
+  let HasMasked = false;
+  let IntrinsicTypes = intrinsic_types;
+}
+
+multiclass VCIXBuiltinSet<string name, string IR_name, string suffix,
+                          string prototype, string type_range,
+                          list<int> intrinsic_types> {
+  if !find(prototype, "0") then {
+  def : VCIXBuiltinSet<name, IR_name, suffix, prototype, type_range, intrinsic_types>;
+  }
+  def : VCIXBuiltinSet<name # "_se", IR_name # "_se", suffix, prototype, type_range, intrinsic_types>;
+}
+
+multiclass RVVVCIXBuiltinSet<list<string> range, string prototype,
+                             list<int> intrinsic_types, bit EncodeVtype,
+                             bit UseGPR> {
+  defvar suffix = !if(EncodeVtype, "", "Uv");
+  foreach r = range in {
+    let RequiredFeatures = !if(!and(UseGPR, !eq(r, "l")),
+                               ["Xsfvcp", "RV64"], ["Xsfvcp"]) in {
+      // If EncodeVtype is true, then these intrinsic don't have any vector
+      // types in the output and inputs, but we still need to add vetvli for
+      // them.  So we encode 
diff erent VTYPE into the intrinsic names, and
+      // then will know which vsetvli is correct.
+      if EncodeVtype then {
+        foreach s = VCIXSuffix<r>.suffix in {
+          defvar name = NAME # "_u" # s;
+          defvar IR_name = NAME # "_e" # s;
+          // Since we already encode the Vtype into the name, so just set
+          // Log2LMUL to zero.  Otherwise the RISCVVEmitter will expand
+          // lots of redunant intrinsic but have same names.
+	  let Log2LMUL = [0] in
+            def : VCIXBuiltinSet<name, IR_name, suffix, prototype, r, intrinsic_types>;
+        }
+      } else {
+        defm : VCIXBuiltinSet<NAME, NAME, suffix, prototype, r, intrinsic_types>;
+      }
+    }
+  }
+}
+
+let SupportOverloading = false in {
+  defm sf_vc_x_se  : RVVVCIXBuiltinSet<["c", "s", "i", "l"], "0KiKiKiUe", [3], /*EncodeVtype*/1, /*UseGPR*/1>;
+  defm sf_vc_i_se  : RVVVCIXBuiltinSet<["c", "s", "i", "l"], "0KiKiKiKi", [3], /*EncodeVtype*/1, /*UseGPR*/0>;
+  defm sf_vc_xv    : RVVVCIXBuiltinSet<["csi", "l"], "0KiKiUvUe",  [2, 3],  /*EncodeVtype*/0, /*UseGPR*/1>;
+  defm sf_vc_iv    : RVVVCIXBuiltinSet<["csi", "l"], "0KiKiUvKi",  [2, 3],  /*EncodeVtype*/0, /*UseGPR*/0>;
+  defm sf_vc_vv    : RVVVCIXBuiltinSet<["csi", "l"], "0KiKiUvUv",  [2, 3],  /*EncodeVtype*/0, /*UseGPR*/0>;
+  defm sf_vc_fv    : RVVVCIXBuiltinSet<["si",  "l"], "0KiKiUvFe",  [2, 3],  /*EncodeVtype*/0, /*UseGPR*/0>;
+  defm sf_vc_xvv   : RVVVCIXBuiltinSet<["csi", "l"], "0KiUvUvUe",  [1, 3],  /*EncodeVtype*/0, /*UseGPR*/1>;
+  defm sf_vc_ivv   : RVVVCIXBuiltinSet<["csi", "l"], "0KiUvUvKi",  [1, 3],  /*EncodeVtype*/0, /*UseGPR*/0>;
+  defm sf_vc_vvv   : RVVVCIXBuiltinSet<["csi", "l"], "0KiUvUvUv",  [1, 3],  /*EncodeVtype*/0, /*UseGPR*/0>;
+  defm sf_vc_fvv   : RVVVCIXBuiltinSet<["si",  "l"], "0KiUvUvFe",  [1, 3],  /*EncodeVtype*/0, /*UseGPR*/0>;
+  defm sf_vc_v_x   : RVVVCIXBuiltinSet<["csi", "l"], "UvKiKiUe",   [-1, 2], /*EncodeVtype*/0, /*UseGPR*/1>;
+  defm sf_vc_v_i   : RVVVCIXBuiltinSet<["csi", "l"], "UvKiKiKi",   [-1, 2], /*EncodeVtype*/0, /*UseGPR*/0>;
+  defm sf_vc_v_xv  : RVVVCIXBuiltinSet<["csi", "l"], "UvKiUvUe",   [-1, 2], /*EncodeVtype*/0, /*UseGPR*/1>;
+  defm sf_vc_v_iv  : RVVVCIXBuiltinSet<["csi", "l"], "UvKiUvKi",   [-1, 2], /*EncodeVtype*/0, /*UseGPR*/0>;
+  defm sf_vc_v_vv  : RVVVCIXBuiltinSet<["csi", "l"], "UvKiUvUv",   [-1, 2], /*EncodeVtype*/0, /*UseGPR*/0>;
+  defm sf_vc_v_fv  : RVVVCIXBuiltinSet<["si",  "l"], "UvKiUvFe",   [-1, 2], /*EncodeVtype*/0, /*UseGPR*/0>;
+  defm sf_vc_v_xvv : RVVVCIXBuiltinSet<["csi", "l"], "UvKiUvUvUe", [-1, 3], /*EncodeVtype*/0, /*UseGPR*/1>;
+  defm sf_vc_v_ivv : RVVVCIXBuiltinSet<["csi", "l"], "UvKiUvUvKi", [-1, 3], /*EncodeVtype*/0, /*UseGPR*/0>;
+  defm sf_vc_v_vvv : RVVVCIXBuiltinSet<["csi", "l"], "UvKiUvUvUv", [-1, 3], /*EncodeVtype*/0, /*UseGPR*/0>;
+  defm sf_vc_v_fvv : RVVVCIXBuiltinSet<["si",  "l"], "UvKiUvUvFe", [-1, 3], /*EncodeVtype*/0, /*UseGPR*/0>;
+  let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
+  defm sf_vc_xvw   : RVVVCIXBuiltinSet<["csi"], "0KiUwUvUe",  [1, 2, 3],  /*EncodeVtype*/0, /*UseGPR*/1>;
+  defm sf_vc_ivw   : RVVVCIXBuiltinSet<["csi"], "0KiUwUvKi",  [1, 2, 3],  /*EncodeVtype*/0, /*UseGPR*/0>;
+  defm sf_vc_vvw   : RVVVCIXBuiltinSet<["csi"], "0KiUwUvUv",  [1, 2, 3],  /*EncodeVtype*/0, /*UseGPR*/0>;
+  defm sf_vc_fvw   : RVVVCIXBuiltinSet<["si"],  "0KiUwUvFe",  [1, 2, 3],  /*EncodeVtype*/0, /*UseGPR*/0>;
+  defm sf_vc_v_xvw : RVVVCIXBuiltinSet<["csi"], "UwKiUwUvUe", [-1, 2, 3], /*EncodeVtype*/0, /*UseGPR*/1>;
+  defm sf_vc_v_ivw : RVVVCIXBuiltinSet<["csi"], "UwKiUwUvKi", [-1, 2, 3], /*EncodeVtype*/0, /*UseGPR*/0>;
+  defm sf_vc_v_vvw : RVVVCIXBuiltinSet<["csi"], "UwKiUwUvUv", [-1, 2, 3], /*EncodeVtype*/0, /*UseGPR*/0>;
+  defm sf_vc_v_fvw : RVVVCIXBuiltinSet<["si"],  "UwKiUwUvFe", [-1, 2, 3], /*EncodeVtype*/0, /*UseGPR*/0>;
+  }
+}

diff  --git a/clang/include/clang/Support/RISCVVIntrinsicUtils.h b/clang/include/clang/Support/RISCVVIntrinsicUtils.h
index a0df9af0e941c..a55429de7f88c 100644
--- a/clang/include/clang/Support/RISCVVIntrinsicUtils.h
+++ b/clang/include/clang/Support/RISCVVIntrinsicUtils.h
@@ -56,6 +56,7 @@ enum class VectorTypeModifier : uint8_t {
   SFixedLog2LMUL1,
   SFixedLog2LMUL2,
   SFixedLog2LMUL3,
+  SignedInteger32,
 };
 
 // Similar to basic type but used to describe what's kind of type related to
@@ -162,6 +163,7 @@ enum ScalarTypeKind : uint8_t {
   Ptr
diff _t,
   UnsignedLong,
   SignedLong,
+  SignedInteger32,
   Boolean,
   SignedInteger,
   UnsignedInteger,

diff  --git a/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
index 87a89feb4b0d8..26e5d9ac9cfa4 100644
--- a/clang/lib/Support/RISCVVIntrinsicUtils.cpp
+++ b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
@@ -197,6 +197,9 @@ void RVVType::initBuiltinStr() {
       llvm_unreachable("Unhandled ElementBitwidth!");
     }
     break;
+  case ScalarTypeKind::SignedInteger32:
+    BuiltinStr += "i";
+    return;
   default:
     llvm_unreachable("ScalarType is invalid!");
   }
@@ -293,6 +296,9 @@ void RVVType::initTypeStr() {
     } else
       Str += getTypeString("float");
     break;
+  case ScalarTypeKind::SignedInteger32:
+    Str += "int";
+    return;
   case ScalarTypeKind::SignedInteger:
     Str += getTypeString("int");
     break;
@@ -398,6 +404,10 @@ Optional<PrototypeDescriptor> PrototypeDescriptor::parsePrototypeDescriptor(
     PT = BaseTypeModifier::Vector;
     VTM = VectorTypeModifier::MaskVector;
     break;
+  case 'i':
+    PT = BaseTypeModifier::Vector;
+    VTM = VectorTypeModifier::SignedInteger32;
+    break;
   case '0':
     PT = BaseTypeModifier::Void;
     break;
@@ -704,6 +714,11 @@ void RVVType::applyModifier(const PrototypeDescriptor &Transformer) {
     break;
   case VectorTypeModifier::NoModifier:
     break;
+  // For SiFive VCIX intrinsic that need const integer for payloads.
+  case VectorTypeModifier::SignedInteger32:
+    ScalarType = ScalarTypeKind::SignedInteger32;
+    Scale = 0;
+    break;
   }
 
   for (unsigned TypeModifierMaskShift = 0;
@@ -1088,7 +1103,7 @@ void RVVIntrinsic::updateNamesAndPolicy(bool IsMasked, bool HasPolicy,
 
 SmallVector<PrototypeDescriptor> parsePrototypes(StringRef Prototypes) {
   SmallVector<PrototypeDescriptor> PrototypeDescriptors;
-  const StringRef Primaries("evwqom0ztul");
+  const StringRef Primaries("evwqom0ztulfi");
   while (!Prototypes.empty()) {
     size_t Idx = 0;
     // Skip over complex prototype because it could contain primitive type

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/xsfvcp-x-rv64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/xsfvcp-x-rv64.c
new file mode 100644
index 0000000000000..954008ecc8295
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/xsfvcp-x-rv64.c
@@ -0,0 +1,116 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+const int p27_26 = 3;
+const int p24_20 = 0x1f;
+const int p11_7 = 0x1f;
+
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e64m1.i64.i64(i32 3, i32 31, i32 31, i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_u64m1(uint64_t rs1, size_t vl) {
+  sf_vc_x_se_u64m1(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e64m2.i64.i64(i32 3, i32 31, i32 31, i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_u64m2(uint64_t rs1, size_t vl) {
+  sf_vc_x_se_u64m2(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e64m4.i64.i64(i32 3, i32 31, i32 31, i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_u64m4(uint64_t rs1, size_t vl) {
+  sf_vc_x_se_u64m4(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e64m8.i64.i64(i32 3, i32 31, i32 31, i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_u64m8(uint64_t rs1, size_t vl) {
+  sf_vc_x_se_u64m8(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.x.se.nxv1i64.i64.i64(i32 3, i32 31, i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_sf_vc_v_x_se_u64m1(uint64_t rs1, size_t vl) {
+  return sf_vc_v_x_se_u64m1(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.x.se.nxv2i64.i64.i64(i32 3, i32 31, i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_sf_vc_v_x_se_u64m2(uint64_t rs1, size_t vl) {
+  return sf_vc_v_x_se_u64m2(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.x.se.nxv4i64.i64.i64(i32 3, i32 31, i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_sf_vc_v_x_se_u64m4(uint64_t rs1, size_t vl) {
+  return sf_vc_v_x_se_u64m4(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.x.se.nxv8i64.i64.i64(i32 3, i32 31, i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_sf_vc_v_x_se_u64m8(uint64_t rs1, size_t vl) {
+  return sf_vc_v_x_se_u64m8(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.x.nxv1i64.i64.i64(i32 3, i32 31, i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_sf_vc_v_x_u64m1(uint64_t rs1, size_t vl) {
+  return sf_vc_v_x_u64m1(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.x.nxv2i64.i64.i64(i32 3, i32 31, i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_sf_vc_v_x_u64m2(uint64_t rs1, size_t vl) {
+  return sf_vc_v_x_u64m2(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.x.nxv4i64.i64.i64(i32 3, i32 31, i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_sf_vc_v_x_u64m4(uint64_t rs1, size_t vl) {
+  return sf_vc_v_x_u64m4(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.x.nxv8i64.i64.i64(i32 3, i32 31, i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_sf_vc_v_x_u64m8(uint64_t rs1, size_t vl) {
+  return sf_vc_v_x_u64m8(p27_26, p24_20, rs1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/xsfvcp-x.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/xsfvcp-x.c
new file mode 100644
index 0000000000000..d283c8036d382
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/xsfvcp-x.c
@@ -0,0 +1,1690 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple riscv32 -target-feature +v -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+const int p27_26 = 3;
+const int p24_20 = 0x1f;
+const int p11_7 = 0x1f;
+const int simm5 = 10;
+
+// CHECK-RV32-LABEL: @test_sf_vc_x_se_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e8mf8.i8.i32(i32 3, i32 31, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e8mf8.i8.i64(i32 3, i32 31, i32 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_u8mf8(uint8_t rs1, size_t vl) {
+  sf_vc_x_se_u8mf8(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_x_se_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e8mf4.i8.i32(i32 3, i32 31, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e8mf4.i8.i64(i32 3, i32 31, i32 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_u8mf4(uint8_t rs1, size_t vl) {
+  sf_vc_x_se_u8mf4(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_x_se_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e8mf2.i8.i32(i32 3, i32 31, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e8mf2.i8.i64(i32 3, i32 31, i32 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_u8mf2(uint8_t rs1, size_t vl) {
+  sf_vc_x_se_u8mf2(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_x_se_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e8m1.i8.i32(i32 3, i32 31, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e8m1.i8.i64(i32 3, i32 31, i32 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_u8m1(uint8_t rs1, size_t vl) {
+  sf_vc_x_se_u8m1(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_x_se_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e8m2.i8.i32(i32 3, i32 31, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e8m2.i8.i64(i32 3, i32 31, i32 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_u8m2(uint8_t rs1, size_t vl) {
+  sf_vc_x_se_u8m2(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_x_se_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e8m4.i8.i32(i32 3, i32 31, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e8m4.i8.i64(i32 3, i32 31, i32 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_u8m4(uint8_t rs1, size_t vl) {
+  sf_vc_x_se_u8m4(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_x_se_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e8m8.i8.i32(i32 3, i32 31, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e8m8.i8.i64(i32 3, i32 31, i32 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_u8m8(uint8_t rs1, size_t vl) {
+  sf_vc_x_se_u8m8(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_x_se_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16mf4.i16.i32(i32 3, i32 31, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16mf4.i16.i64(i32 3, i32 31, i32 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_u16mf4(uint16_t rs1, size_t vl) {
+  sf_vc_x_se_u16mf4(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_x_se_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16mf2.i16.i32(i32 3, i32 31, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16mf2.i16.i64(i32 3, i32 31, i32 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_u16mf2(uint16_t rs1, size_t vl) {
+  sf_vc_x_se_u16mf2(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_x_se_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16m1.i16.i32(i32 3, i32 31, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16m1.i16.i64(i32 3, i32 31, i32 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_u16m1(uint16_t rs1, size_t vl) {
+  sf_vc_x_se_u16m1(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_x_se_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16m2.i16.i32(i32 3, i32 31, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16m2.i16.i64(i32 3, i32 31, i32 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_u16m2(uint16_t rs1, size_t vl) {
+  sf_vc_x_se_u16m2(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_x_se_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16m4.i16.i32(i32 3, i32 31, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16m4.i16.i64(i32 3, i32 31, i32 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_u16m4(uint16_t rs1, size_t vl) {
+  sf_vc_x_se_u16m4(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_x_se_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16m8.i16.i32(i32 3, i32 31, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16m8.i16.i64(i32 3, i32 31, i32 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_u16m8(uint16_t rs1, size_t vl) {
+  sf_vc_x_se_u16m8(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_x_se_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e32mf2.i32.i32(i32 3, i32 31, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e32mf2.i32.i64(i32 3, i32 31, i32 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_u32mf2(uint32_t rs1, size_t vl) {
+  sf_vc_x_se_u32mf2(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_x_se_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e32m1.i32.i32(i32 3, i32 31, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e32m1.i32.i64(i32 3, i32 31, i32 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_u32m1(uint32_t rs1, size_t vl) {
+  sf_vc_x_se_u32m1(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_x_se_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e32m2.i32.i32(i32 3, i32 31, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e32m2.i32.i64(i32 3, i32 31, i32 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_u32m2(uint32_t rs1, size_t vl) {
+  sf_vc_x_se_u32m2(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_x_se_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e32m4.i32.i32(i32 3, i32 31, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e32m4.i32.i64(i32 3, i32 31, i32 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_u32m4(uint32_t rs1, size_t vl) {
+  sf_vc_x_se_u32m4(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_x_se_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e32m8.i32.i32(i32 3, i32 31, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_x_se_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e32m8.i32.i64(i32 3, i32 31, i32 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_x_se_u32m8(uint32_t rs1, size_t vl) {
+  sf_vc_x_se_u32m8(p27_26, p24_20, p11_7, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.x.se.nxv1i8.i8.i32(i32 3, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.x.se.nxv1i8.i8.i64(i32 3, i32 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vc_v_x_se_u8mf8(uint8_t rs1, size_t vl) {
+  return sf_vc_v_x_se_u8mf8(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.x.se.nxv2i8.i8.i32(i32 3, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.x.se.nxv2i8.i8.i64(i32 3, i32 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vc_v_x_se_u8mf4(uint8_t rs1, size_t vl) {
+  return sf_vc_v_x_se_u8mf4(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.x.se.nxv4i8.i8.i32(i32 3, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.x.se.nxv4i8.i8.i64(i32 3, i32 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vc_v_x_se_u8mf2(uint8_t rs1, size_t vl) {
+  return sf_vc_v_x_se_u8mf2(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.x.se.nxv8i8.i8.i32(i32 3, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.x.se.nxv8i8.i8.i64(i32 3, i32 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vc_v_x_se_u8m1(uint8_t rs1, size_t vl) {
+  return sf_vc_v_x_se_u8m1(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.x.se.nxv16i8.i8.i32(i32 3, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.x.se.nxv16i8.i8.i64(i32 3, i32 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vc_v_x_se_u8m2(uint8_t rs1, size_t vl) {
+  return sf_vc_v_x_se_u8m2(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.x.se.nxv32i8.i8.i32(i32 3, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.x.se.nxv32i8.i8.i64(i32 3, i32 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_sf_vc_v_x_se_u8m4(uint8_t rs1, size_t vl) {
+  return sf_vc_v_x_se_u8m4(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.x.se.nxv64i8.i8.i32(i32 3, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.x.se.nxv64i8.i8.i64(i32 3, i32 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_sf_vc_v_x_se_u8m8(uint8_t rs1, size_t vl) {
+  return sf_vc_v_x_se_u8m8(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.x.se.nxv1i16.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.x.se.nxv1i16.i16.i64(i32 3, i32 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_sf_vc_v_x_se_u16mf4(uint16_t rs1, size_t vl) {
+  return sf_vc_v_x_se_u16mf4(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.x.se.nxv2i16.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.x.se.nxv2i16.i16.i64(i32 3, i32 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_sf_vc_v_x_se_u16mf2(uint16_t rs1, size_t vl) {
+  return sf_vc_v_x_se_u16mf2(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.x.se.nxv4i16.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.x.se.nxv4i16.i16.i64(i32 3, i32 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_sf_vc_v_x_se_u16m1(uint16_t rs1, size_t vl) {
+  return sf_vc_v_x_se_u16m1(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.x.se.nxv8i16.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.x.se.nxv8i16.i16.i64(i32 3, i32 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_sf_vc_v_x_se_u16m2(uint16_t rs1, size_t vl) {
+  return sf_vc_v_x_se_u16m2(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.x.se.nxv16i16.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.x.se.nxv16i16.i16.i64(i32 3, i32 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_sf_vc_v_x_se_u16m4(uint16_t rs1, size_t vl) {
+  return sf_vc_v_x_se_u16m4(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.x.se.nxv32i16.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.x.se.nxv32i16.i16.i64(i32 3, i32 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_sf_vc_v_x_se_u16m8(uint16_t rs1, size_t vl) {
+  return sf_vc_v_x_se_u16m8(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.x.se.nxv1i32.i32.i32(i32 3, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.x.se.nxv1i32.i32.i64(i32 3, i32 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_sf_vc_v_x_se_u32mf2(uint32_t rs1, size_t vl) {
+  return sf_vc_v_x_se_u32mf2(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.x.se.nxv2i32.i32.i32(i32 3, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.x.se.nxv2i32.i32.i64(i32 3, i32 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_sf_vc_v_x_se_u32m1(uint32_t rs1, size_t vl) {
+  return sf_vc_v_x_se_u32m1(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.x.se.nxv4i32.i32.i32(i32 3, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.x.se.nxv4i32.i32.i64(i32 3, i32 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_sf_vc_v_x_se_u32m2(uint32_t rs1, size_t vl) {
+  return sf_vc_v_x_se_u32m2(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.x.se.nxv8i32.i32.i32(i32 3, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.x.se.nxv8i32.i32.i64(i32 3, i32 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_sf_vc_v_x_se_u32m4(uint32_t rs1, size_t vl) {
+  return sf_vc_v_x_se_u32m4(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.x.se.nxv16i32.i32.i32(i32 3, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.x.se.nxv16i32.i32.i64(i32 3, i32 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_sf_vc_v_x_se_u32m8(uint32_t rs1, size_t vl) {
+  return sf_vc_v_x_se_u32m8(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.x.nxv1i8.i8.i32(i32 3, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.x.nxv1i8.i8.i64(i32 3, i32 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vc_v_x_u8mf8(uint8_t rs1, size_t vl) {
+  return sf_vc_v_x_u8mf8(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.x.nxv2i8.i8.i32(i32 3, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.x.nxv2i8.i8.i64(i32 3, i32 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vc_v_x_u8mf4(uint8_t rs1, size_t vl) {
+  return sf_vc_v_x_u8mf4(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.x.nxv4i8.i8.i32(i32 3, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.x.nxv4i8.i8.i64(i32 3, i32 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vc_v_x_u8mf2(uint8_t rs1, size_t vl) {
+  return sf_vc_v_x_u8mf2(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.x.nxv8i8.i8.i32(i32 3, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.x.nxv8i8.i8.i64(i32 3, i32 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vc_v_x_u8m1(uint8_t rs1, size_t vl) {
+  return sf_vc_v_x_u8m1(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.x.nxv16i8.i8.i32(i32 3, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.x.nxv16i8.i8.i64(i32 3, i32 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vc_v_x_u8m2(uint8_t rs1, size_t vl) {
+  return sf_vc_v_x_u8m2(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.x.nxv32i8.i8.i32(i32 3, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.x.nxv32i8.i8.i64(i32 3, i32 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_sf_vc_v_x_u8m4(uint8_t rs1, size_t vl) {
+  return sf_vc_v_x_u8m4(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.x.nxv64i8.i8.i32(i32 3, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.x.nxv64i8.i8.i64(i32 3, i32 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_sf_vc_v_x_u8m8(uint8_t rs1, size_t vl) {
+  return sf_vc_v_x_u8m8(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.x.nxv1i16.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.x.nxv1i16.i16.i64(i32 3, i32 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_sf_vc_v_x_u16mf4(uint16_t rs1, size_t vl) {
+  return sf_vc_v_x_u16mf4(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.x.nxv2i16.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.x.nxv2i16.i16.i64(i32 3, i32 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_sf_vc_v_x_u16mf2(uint16_t rs1, size_t vl) {
+  return sf_vc_v_x_u16mf2(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.x.nxv4i16.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.x.nxv4i16.i16.i64(i32 3, i32 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_sf_vc_v_x_u16m1(uint16_t rs1, size_t vl) {
+  return sf_vc_v_x_u16m1(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.x.nxv8i16.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.x.nxv8i16.i16.i64(i32 3, i32 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_sf_vc_v_x_u16m2(uint16_t rs1, size_t vl) {
+  return sf_vc_v_x_u16m2(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.x.nxv16i16.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.x.nxv16i16.i16.i64(i32 3, i32 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_sf_vc_v_x_u16m4(uint16_t rs1, size_t vl) {
+  return sf_vc_v_x_u16m4(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.x.nxv32i16.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.x.nxv32i16.i16.i64(i32 3, i32 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_sf_vc_v_x_u16m8(uint16_t rs1, size_t vl) {
+  return sf_vc_v_x_u16m8(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.x.nxv1i32.i32.i32(i32 3, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.x.nxv1i32.i32.i64(i32 3, i32 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_sf_vc_v_x_u32mf2(uint32_t rs1, size_t vl) {
+  return sf_vc_v_x_u32mf2(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.x.nxv2i32.i32.i32(i32 3, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.x.nxv2i32.i32.i64(i32 3, i32 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_sf_vc_v_x_u32m1(uint32_t rs1, size_t vl) {
+  return sf_vc_v_x_u32m1(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.x.nxv4i32.i32.i32(i32 3, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.x.nxv4i32.i32.i64(i32 3, i32 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_sf_vc_v_x_u32m2(uint32_t rs1, size_t vl) {
+  return sf_vc_v_x_u32m2(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.x.nxv8i32.i32.i32(i32 3, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.x.nxv8i32.i32.i64(i32 3, i32 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_sf_vc_v_x_u32m4(uint32_t rs1, size_t vl) {
+  return sf_vc_v_x_u32m4(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_x_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.x.nxv16i32.i32.i32(i32 3, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_x_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.x.nxv16i32.i32.i64(i32 3, i32 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_sf_vc_v_x_u32m8(uint32_t rs1, size_t vl) {
+  return sf_vc_v_x_u32m8(p27_26, p24_20, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e8mf8.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e8mf8.i32.i64(i32 3, i32 31, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_u8mf8(size_t vl) {
+  sf_vc_i_se_u8mf8(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e8mf4.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e8mf4.i32.i64(i32 3, i32 31, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_u8mf4(size_t vl) {
+  sf_vc_i_se_u8mf4(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e8mf2.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e8mf2.i32.i64(i32 3, i32 31, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_u8mf2(size_t vl) {
+  sf_vc_i_se_u8mf2(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e8m1.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e8m1.i32.i64(i32 3, i32 31, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_u8m1(size_t vl) {
+  sf_vc_i_se_u8m1(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e8m2.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e8m2.i32.i64(i32 3, i32 31, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_u8m2(size_t vl) {
+  sf_vc_i_se_u8m2(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e8m4.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e8m4.i32.i64(i32 3, i32 31, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_u8m4(size_t vl) {
+  sf_vc_i_se_u8m4(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e8m8.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e8m8.i32.i64(i32 3, i32 31, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_u8m8(size_t vl) {
+  sf_vc_i_se_u8m8(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16mf4.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16mf4.i32.i64(i32 3, i32 31, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_u16mf4(size_t vl) {
+  sf_vc_i_se_u16mf4(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16mf2.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16mf2.i32.i64(i32 3, i32 31, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_u16mf2(size_t vl) {
+  sf_vc_i_se_u16mf2(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16m1.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16m1.i32.i64(i32 3, i32 31, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_u16m1(size_t vl) {
+  sf_vc_i_se_u16m1(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16m2.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16m2.i32.i64(i32 3, i32 31, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_u16m2(size_t vl) {
+  sf_vc_i_se_u16m2(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16m4.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16m4.i32.i64(i32 3, i32 31, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_u16m4(size_t vl) {
+  sf_vc_i_se_u16m4(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16m8.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16m8.i32.i64(i32 3, i32 31, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_u16m8(size_t vl) {
+  sf_vc_i_se_u16m8(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e32mf2.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e32mf2.i32.i64(i32 3, i32 31, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_u32mf2(size_t vl) {
+  sf_vc_i_se_u32mf2(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e32m1.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e32m1.i32.i64(i32 3, i32 31, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_u32m1(size_t vl) {
+  sf_vc_i_se_u32m1(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e32m2.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e32m2.i32.i64(i32 3, i32 31, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_u32m2(size_t vl) {
+  sf_vc_i_se_u32m2(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e32m4.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e32m4.i32.i64(i32 3, i32 31, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_u32m4(size_t vl) {
+  sf_vc_i_se_u32m4(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e32m8.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e32m8.i32.i64(i32 3, i32 31, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_u32m8(size_t vl) {
+  sf_vc_i_se_u32m8(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e64m1.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e64m1.i32.i64(i32 3, i32 31, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_u64m1(size_t vl) {
+  sf_vc_i_se_u64m1(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e64m2.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e64m2.i32.i64(i32 3, i32 31, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_u64m2(size_t vl) {
+  sf_vc_i_se_u64m2(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e64m4.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e64m4.i32.i64(i32 3, i32 31, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_u64m4(size_t vl) {
+  sf_vc_i_se_u64m4(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_i_se_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e64m8.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_i_se_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e64m8.i32.i64(i32 3, i32 31, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_i_se_u64m8(size_t vl) {
+  sf_vc_i_se_u64m8(p27_26, p24_20, p11_7, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.i.se.nxv1i8.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.i.se.nxv1i8.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vc_v_i_se_u8mf8(size_t vl) {
+  return sf_vc_v_i_se_u8mf8(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.i.se.nxv2i8.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.i.se.nxv2i8.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vc_v_i_se_u8mf4(size_t vl) {
+  return sf_vc_v_i_se_u8mf4(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.i.se.nxv4i8.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.i.se.nxv4i8.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vc_v_i_se_u8mf2(size_t vl) {
+  return sf_vc_v_i_se_u8mf2(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.i.se.nxv8i8.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.i.se.nxv8i8.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vc_v_i_se_u8m1(size_t vl) {
+  return sf_vc_v_i_se_u8m1(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.i.se.nxv16i8.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.i.se.nxv16i8.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vc_v_i_se_u8m2(size_t vl) {
+  return sf_vc_v_i_se_u8m2(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.i.se.nxv32i8.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.i.se.nxv32i8.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_sf_vc_v_i_se_u8m4(size_t vl) {
+  return sf_vc_v_i_se_u8m4(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.i.se.nxv64i8.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.i.se.nxv64i8.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_sf_vc_v_i_se_u8m8(size_t vl) {
+  return sf_vc_v_i_se_u8m8(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.i.se.nxv1i16.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.i.se.nxv1i16.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_sf_vc_v_i_se_u16mf4(size_t vl) {
+  return sf_vc_v_i_se_u16mf4(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.i.se.nxv2i16.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.i.se.nxv2i16.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_sf_vc_v_i_se_u16mf2(size_t vl) {
+  return sf_vc_v_i_se_u16mf2(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.i.se.nxv4i16.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.i.se.nxv4i16.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_sf_vc_v_i_se_u16m1(size_t vl) {
+  return sf_vc_v_i_se_u16m1(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.i.se.nxv8i16.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.i.se.nxv8i16.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_sf_vc_v_i_se_u16m2(size_t vl) {
+  return sf_vc_v_i_se_u16m2(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.i.se.nxv16i16.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.i.se.nxv16i16.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_sf_vc_v_i_se_u16m4(size_t vl) {
+  return sf_vc_v_i_se_u16m4(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.i.se.nxv32i16.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.i.se.nxv32i16.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_sf_vc_v_i_se_u16m8(size_t vl) {
+  return sf_vc_v_i_se_u16m8(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.i.se.nxv1i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.i.se.nxv1i32.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_sf_vc_v_i_se_u32mf2(size_t vl) {
+  return sf_vc_v_i_se_u32mf2(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.i.se.nxv2i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.i.se.nxv2i32.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_sf_vc_v_i_se_u32m1(size_t vl) {
+  return sf_vc_v_i_se_u32m1(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.i.se.nxv4i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.i.se.nxv4i32.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_sf_vc_v_i_se_u32m2(size_t vl) {
+  return sf_vc_v_i_se_u32m2(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.i.se.nxv8i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.i.se.nxv8i32.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_sf_vc_v_i_se_u32m4(size_t vl) {
+  return sf_vc_v_i_se_u32m4(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.i.se.nxv16i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.i.se.nxv16i32.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_sf_vc_v_i_se_u32m8(size_t vl) {
+  return sf_vc_v_i_se_u32m8(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.i.se.nxv1i64.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.i.se.nxv1i64.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_sf_vc_v_i_se_u64m1(size_t vl) {
+  return sf_vc_v_i_se_u64m1(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.i.se.nxv2i64.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.i.se.nxv2i64.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_sf_vc_v_i_se_u64m2(size_t vl) {
+  return sf_vc_v_i_se_u64m2(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.i.se.nxv4i64.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.i.se.nxv4i64.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_sf_vc_v_i_se_u64m4(size_t vl) {
+  return sf_vc_v_i_se_u64m4(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.i.se.nxv8i64.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.i.se.nxv8i64.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_sf_vc_v_i_se_u64m8(size_t vl) {
+  return sf_vc_v_i_se_u64m8(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.i.nxv1i8.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.i.nxv1i8.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vc_v_i_u8mf8(size_t vl) {
+  return sf_vc_v_i_u8mf8(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.i.nxv2i8.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.i.nxv2i8.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vc_v_i_u8mf4(size_t vl) {
+  return sf_vc_v_i_u8mf4(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.i.nxv4i8.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.i.nxv4i8.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vc_v_i_u8mf2(size_t vl) {
+  return sf_vc_v_i_u8mf2(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.i.nxv8i8.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.i.nxv8i8.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vc_v_i_u8m1(size_t vl) {
+  return sf_vc_v_i_u8m1(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.i.nxv16i8.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.i.nxv16i8.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vc_v_i_u8m2(size_t vl) {
+  return sf_vc_v_i_u8m2(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.i.nxv32i8.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.i.nxv32i8.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_sf_vc_v_i_u8m4(size_t vl) {
+  return sf_vc_v_i_u8m4(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.i.nxv64i8.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.i.nxv64i8.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_sf_vc_v_i_u8m8(size_t vl) {
+  return sf_vc_v_i_u8m8(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.i.nxv1i16.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.i.nxv1i16.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_sf_vc_v_i_u16mf4(size_t vl) {
+  return sf_vc_v_i_u16mf4(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.i.nxv2i16.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.i.nxv2i16.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_sf_vc_v_i_u16mf2(size_t vl) {
+  return sf_vc_v_i_u16mf2(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.i.nxv4i16.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.i.nxv4i16.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_sf_vc_v_i_u16m1(size_t vl) {
+  return sf_vc_v_i_u16m1(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.i.nxv8i16.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.i.nxv8i16.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_sf_vc_v_i_u16m2(size_t vl) {
+  return sf_vc_v_i_u16m2(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.i.nxv16i16.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.i.nxv16i16.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_sf_vc_v_i_u16m4(size_t vl) {
+  return sf_vc_v_i_u16m4(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.i.nxv32i16.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.i.nxv32i16.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_sf_vc_v_i_u16m8(size_t vl) {
+  return sf_vc_v_i_u16m8(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.i.nxv1i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.i.nxv1i32.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_sf_vc_v_i_u32mf2(size_t vl) {
+  return sf_vc_v_i_u32mf2(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.i.nxv2i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.i.nxv2i32.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_sf_vc_v_i_u32m1(size_t vl) {
+  return sf_vc_v_i_u32m1(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.i.nxv4i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.i.nxv4i32.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_sf_vc_v_i_u32m2(size_t vl) {
+  return sf_vc_v_i_u32m2(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.i.nxv8i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.i.nxv8i32.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_sf_vc_v_i_u32m4(size_t vl) {
+  return sf_vc_v_i_u32m4(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.i.nxv16i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.i.nxv16i32.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_sf_vc_v_i_u32m8(size_t vl) {
+  return sf_vc_v_i_u32m8(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.i.nxv1i64.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.i.nxv1i64.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_sf_vc_v_i_u64m1(size_t vl) {
+  return sf_vc_v_i_u64m1(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.i.nxv2i64.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.i.nxv2i64.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_sf_vc_v_i_u64m2(size_t vl) {
+  return sf_vc_v_i_u64m2(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.i.nxv4i64.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.i.nxv4i64.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_sf_vc_v_i_u64m4(size_t vl) {
+  return sf_vc_v_i_u64m4(p27_26, p24_20, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_i_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.i.nxv8i64.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_i_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.i.nxv8i64.i32.i64(i32 3, i32 31, i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_sf_vc_v_i_u64m8(size_t vl) {
+  return sf_vc_v_i_u64m8(p27_26, p24_20, simm5, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/xsfvcp-xv-rv64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/xsfvcp-xv-rv64.c
new file mode 100644
index 0000000000000..d865d0c0b172e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/xsfvcp-xv-rv64.c
@@ -0,0 +1,116 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+const int p27_26 = 3;
+const int p11_7 = 0x1f;
+
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv1i64.i64.i64(i32 3, i32 31, <vscale x 1 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  sf_vc_xv_se_u64m1(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv2i64.i64.i64(i32 3, i32 31, <vscale x 2 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  sf_vc_xv_se_u64m2(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv4i64.i64.i64(i32 3, i32 31, <vscale x 4 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  sf_vc_xv_se_u64m4(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv8i64.i64.i64(i32 3, i32 31, <vscale x 8 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  sf_vc_xv_se_u64m8(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xv.se.nxv1i64.i64.i64(i32 3, <vscale x 1 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_sf_vc_v_xv_se_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return sf_vc_v_xv_se_u64m1(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xv.se.nxv2i64.i64.i64(i32 3, <vscale x 2 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_sf_vc_v_xv_se_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return sf_vc_v_xv_se_u64m2(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xv.se.nxv4i64.i64.i64(i32 3, <vscale x 4 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_sf_vc_v_xv_se_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return sf_vc_v_xv_se_u64m4(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xv.se.nxv8i64.i64.i64(i32 3, <vscale x 8 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_sf_vc_v_xv_se_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return sf_vc_v_xv_se_u64m8(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xv.nxv1i64.i64.i64(i32 3, <vscale x 1 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_sf_vc_v_xv_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return sf_vc_v_xv_u64m1(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xv.nxv2i64.i64.i64(i32 3, <vscale x 2 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_sf_vc_v_xv_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return sf_vc_v_xv_u64m2(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xv.nxv4i64.i64.i64(i32 3, <vscale x 4 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_sf_vc_v_xv_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return sf_vc_v_xv_u64m4(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xv.nxv8i64.i64.i64(i32 3, <vscale x 8 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_sf_vc_v_xv_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return sf_vc_v_xv_u64m8(p27_26, vs2, rs1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/xsfvcp-xv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/xsfvcp-xv.c
new file mode 100644
index 0000000000000..56a0f69209df6
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/xsfvcp-xv.c
@@ -0,0 +1,3244 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple riscv32 -target-feature +v -target-feature +zfh -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+const int p27_26 = 3;
+const int p27 = 1;
+const int p11_7 = 0x1f;
+const int simm5 = 10;
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv1i8.nxv1i8.i32(i32 3, i32 31, <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv1i8.nxv1i8.i64(i32 3, i32 31, <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  sf_vc_vv_se_u8mf8(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv2i8.nxv2i8.i32(i32 3, i32 31, <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv2i8.nxv2i8.i64(i32 3, i32 31, <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+  sf_vc_vv_se_u8mf4(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv4i8.nxv4i8.i32(i32 3, i32 31, <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv4i8.nxv4i8.i64(i32 3, i32 31, <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  sf_vc_vv_se_u8mf2(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv8i8.nxv8i8.i32(i32 3, i32 31, <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv8i8.nxv8i8.i64(i32 3, i32 31, <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  sf_vc_vv_se_u8m1(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv16i8.nxv16i8.i32(i32 3, i32 31, <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv16i8.nxv16i8.i64(i32 3, i32 31, <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+  sf_vc_vv_se_u8m2(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv32i8.nxv32i8.i32(i32 3, i32 31, <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv32i8.nxv32i8.i64(i32 3, i32 31, <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+  sf_vc_vv_se_u8m4(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv64i8.nxv64i8.i32(i32 3, i32 31, <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv64i8.nxv64i8.i64(i32 3, i32 31, <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+  sf_vc_vv_se_u8m8(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv1i16.nxv1i16.i32(i32 3, i32 31, <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv1i16.nxv1i16.i64(i32 3, i32 31, <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+  sf_vc_vv_se_u16mf4(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv2i16.nxv2i16.i32(i32 3, i32 31, <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv2i16.nxv2i16.i64(i32 3, i32 31, <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+  sf_vc_vv_se_u16mf2(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv4i16.nxv4i16.i32(i32 3, i32 31, <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv4i16.nxv4i16.i64(i32 3, i32 31, <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+  sf_vc_vv_se_u16m1(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv8i16.nxv8i16.i32(i32 3, i32 31, <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv8i16.nxv8i16.i64(i32 3, i32 31, <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+  sf_vc_vv_se_u16m2(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv16i16.nxv16i16.i32(i32 3, i32 31, <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv16i16.nxv16i16.i64(i32 3, i32 31, <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+  sf_vc_vv_se_u16m4(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv32i16.nxv32i16.i32(i32 3, i32 31, <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv32i16.nxv32i16.i64(i32 3, i32 31, <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
+  sf_vc_vv_se_u16m8(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv1i32.nxv1i32.i32(i32 3, i32 31, <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv1i32.nxv1i32.i64(i32 3, i32 31, <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  sf_vc_vv_se_u32mf2(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv2i32.nxv2i32.i32(i32 3, i32 31, <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv2i32.nxv2i32.i64(i32 3, i32 31, <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  sf_vc_vv_se_u32m1(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv4i32.nxv4i32.i32(i32 3, i32 31, <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv4i32.nxv4i32.i64(i32 3, i32 31, <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  sf_vc_vv_se_u32m2(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv8i32.nxv8i32.i32(i32 3, i32 31, <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv8i32.nxv8i32.i64(i32 3, i32 31, <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  sf_vc_vv_se_u32m4(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv16i32.nxv16i32.i32(i32 3, i32 31, <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv16i32.nxv16i32.i64(i32 3, i32 31, <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  sf_vc_vv_se_u32m8(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv1i64.nxv1i64.i32(i32 3, i32 31, <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv1i64.nxv1i64.i64(i32 3, i32 31, <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  sf_vc_vv_se_u64m1(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv2i64.nxv2i64.i32(i32 3, i32 31, <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv2i64.nxv2i64.i64(i32 3, i32 31, <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  sf_vc_vv_se_u64m2(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv4i64.nxv4i64.i32(i32 3, i32 31, <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv4i64.nxv4i64.i64(i32 3, i32 31, <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  sf_vc_vv_se_u64m4(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vv_se_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv8i64.nxv8i64.i32(i32 3, i32 31, <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vv_se_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vv.se.nxv8i64.nxv8i64.i64(i32 3, i32 31, <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vv_se_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  sf_vc_vv_se_u64m8(p27_26, p11_7, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv1i8.nxv1i8.i32(i32 3, <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv1i8.nxv1i8.i64(i32 3, <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vc_v_vv_se_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return sf_vc_v_vv_se_u8mf8(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv2i8.nxv2i8.i32(i32 3, <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv2i8.nxv2i8.i64(i32 3, <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vc_v_vv_se_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+  return sf_vc_v_vv_se_u8mf4(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv4i8.nxv4i8.i32(i32 3, <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv4i8.nxv4i8.i64(i32 3, <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vc_v_vv_se_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return sf_vc_v_vv_se_u8mf2(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv8i8.nxv8i8.i32(i32 3, <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv8i8.nxv8i8.i64(i32 3, <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vc_v_vv_se_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return sf_vc_v_vv_se_u8m1(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv16i8.nxv16i8.i32(i32 3, <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv16i8.nxv16i8.i64(i32 3, <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vc_v_vv_se_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+  return sf_vc_v_vv_se_u8m2(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv32i8.nxv32i8.i32(i32 3, <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv32i8.nxv32i8.i64(i32 3, <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_sf_vc_v_vv_se_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+  return sf_vc_v_vv_se_u8m4(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv64i8.nxv64i8.i32(i32 3, <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv64i8.nxv64i8.i64(i32 3, <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_sf_vc_v_vv_se_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+  return sf_vc_v_vv_se_u8m8(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv1i16.nxv1i16.i32(i32 3, <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv1i16.nxv1i16.i64(i32 3, <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_sf_vc_v_vv_se_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+  return sf_vc_v_vv_se_u16mf4(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv2i16.nxv2i16.i32(i32 3, <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv2i16.nxv2i16.i64(i32 3, <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_sf_vc_v_vv_se_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+  return sf_vc_v_vv_se_u16mf2(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv4i16.nxv4i16.i32(i32 3, <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv4i16.nxv4i16.i64(i32 3, <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_sf_vc_v_vv_se_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+  return sf_vc_v_vv_se_u16m1(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv8i16.nxv8i16.i32(i32 3, <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv8i16.nxv8i16.i64(i32 3, <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_sf_vc_v_vv_se_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+  return sf_vc_v_vv_se_u16m2(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv16i16.nxv16i16.i32(i32 3, <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv16i16.nxv16i16.i64(i32 3, <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_sf_vc_v_vv_se_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+  return sf_vc_v_vv_se_u16m4(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv32i16.nxv32i16.i32(i32 3, <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv32i16.nxv32i16.i64(i32 3, <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_sf_vc_v_vv_se_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
+  return sf_vc_v_vv_se_u16m8(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv1i32.nxv1i32.i32(i32 3, <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv1i32.nxv1i32.i64(i32 3, <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_sf_vc_v_vv_se_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return sf_vc_v_vv_se_u32mf2(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv2i32.nxv2i32.i32(i32 3, <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv2i32.nxv2i32.i64(i32 3, <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_sf_vc_v_vv_se_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return sf_vc_v_vv_se_u32m1(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv4i32.nxv4i32.i32(i32 3, <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv4i32.nxv4i32.i64(i32 3, <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_sf_vc_v_vv_se_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return sf_vc_v_vv_se_u32m2(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv8i32.nxv8i32.i32(i32 3, <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv8i32.nxv8i32.i64(i32 3, <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_sf_vc_v_vv_se_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return sf_vc_v_vv_se_u32m4(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv16i32.nxv16i32.i32(i32 3, <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv16i32.nxv16i32.i64(i32 3, <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_sf_vc_v_vv_se_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return sf_vc_v_vv_se_u32m8(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv1i64.nxv1i64.i32(i32 3, <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv1i64.nxv1i64.i64(i32 3, <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_sf_vc_v_vv_se_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return sf_vc_v_vv_se_u64m1(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv2i64.nxv2i64.i32(i32 3, <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv2i64.nxv2i64.i64(i32 3, <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_sf_vc_v_vv_se_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return sf_vc_v_vv_se_u64m2(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv4i64.nxv4i64.i32(i32 3, <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv4i64.nxv4i64.i64(i32 3, <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_sf_vc_v_vv_se_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return sf_vc_v_vv_se_u64m4(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv8i64.nxv8i64.i32(i32 3, <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv8i64.nxv8i64.i64(i32 3, <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_sf_vc_v_vv_se_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return sf_vc_v_vv_se_u64m8(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vv.nxv1i8.nxv1i8.i32(i32 3, <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vv.nxv1i8.nxv1i8.i64(i32 3, <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vc_v_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return sf_vc_v_vv_u8mf8(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vv.nxv2i8.nxv2i8.i32(i32 3, <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vv.nxv2i8.nxv2i8.i64(i32 3, <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vc_v_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+  return sf_vc_v_vv_u8mf4(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vv.nxv4i8.nxv4i8.i32(i32 3, <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vv.nxv4i8.nxv4i8.i64(i32 3, <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vc_v_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return sf_vc_v_vv_u8mf2(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vv.nxv8i8.nxv8i8.i32(i32 3, <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vv.nxv8i8.nxv8i8.i64(i32 3, <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vc_v_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return sf_vc_v_vv_u8m1(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vv.nxv16i8.nxv16i8.i32(i32 3, <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vv.nxv16i8.nxv16i8.i64(i32 3, <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vc_v_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+  return sf_vc_v_vv_u8m2(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vv.nxv32i8.nxv32i8.i32(i32 3, <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vv.nxv32i8.nxv32i8.i64(i32 3, <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_sf_vc_v_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+  return sf_vc_v_vv_u8m4(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vv.nxv64i8.nxv64i8.i32(i32 3, <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vv.nxv64i8.nxv64i8.i64(i32 3, <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_sf_vc_v_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+  return sf_vc_v_vv_u8m8(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vv.nxv1i16.nxv1i16.i32(i32 3, <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vv.nxv1i16.nxv1i16.i64(i32 3, <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_sf_vc_v_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+  return sf_vc_v_vv_u16mf4(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vv.nxv2i16.nxv2i16.i32(i32 3, <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vv.nxv2i16.nxv2i16.i64(i32 3, <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_sf_vc_v_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+  return sf_vc_v_vv_u16mf2(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vv.nxv4i16.nxv4i16.i32(i32 3, <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vv.nxv4i16.nxv4i16.i64(i32 3, <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_sf_vc_v_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+  return sf_vc_v_vv_u16m1(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vv.nxv8i16.nxv8i16.i32(i32 3, <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vv.nxv8i16.nxv8i16.i64(i32 3, <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_sf_vc_v_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+  return sf_vc_v_vv_u16m2(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vv.nxv16i16.nxv16i16.i32(i32 3, <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vv.nxv16i16.nxv16i16.i64(i32 3, <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_sf_vc_v_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+  return sf_vc_v_vv_u16m4(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vv.nxv32i16.nxv32i16.i32(i32 3, <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vv.nxv32i16.nxv32i16.i64(i32 3, <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_sf_vc_v_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
+  return sf_vc_v_vv_u16m8(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vv.nxv1i32.nxv1i32.i32(i32 3, <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vv.nxv1i32.nxv1i32.i64(i32 3, <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_sf_vc_v_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return sf_vc_v_vv_u32mf2(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vv.nxv2i32.nxv2i32.i32(i32 3, <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vv.nxv2i32.nxv2i32.i64(i32 3, <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_sf_vc_v_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return sf_vc_v_vv_u32m1(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vv.nxv4i32.nxv4i32.i32(i32 3, <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vv.nxv4i32.nxv4i32.i64(i32 3, <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_sf_vc_v_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return sf_vc_v_vv_u32m2(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vv.nxv8i32.nxv8i32.i32(i32 3, <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vv.nxv8i32.nxv8i32.i64(i32 3, <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_sf_vc_v_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return sf_vc_v_vv_u32m4(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vv.nxv16i32.nxv16i32.i32(i32 3, <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vv.nxv16i32.nxv16i32.i64(i32 3, <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_sf_vc_v_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return sf_vc_v_vv_u32m8(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vv.nxv1i64.nxv1i64.i32(i32 3, <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vv.nxv1i64.nxv1i64.i64(i32 3, <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_sf_vc_v_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return sf_vc_v_vv_u64m1(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vv.nxv2i64.nxv2i64.i32(i32 3, <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vv.nxv2i64.nxv2i64.i64(i32 3, <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_sf_vc_v_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return sf_vc_v_vv_u64m2(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vv.nxv4i64.nxv4i64.i32(i32 3, <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vv.nxv4i64.nxv4i64.i64(i32 3, <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_sf_vc_v_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return sf_vc_v_vv_u64m4(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vv_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vv.nxv8i64.nxv8i64.i32(i32 3, <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vv_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vv.nxv8i64.nxv8i64.i64(i32 3, <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_sf_vc_v_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return sf_vc_v_vv_u64m8(p27_26, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xv_se_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv1i8.i8.i32(i32 3, i32 31, <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv1i8.i8.i64(i32 3, i32 31, <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) {
+  sf_vc_xv_se_u8mf8(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xv_se_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv2i8.i8.i32(i32 3, i32 31, <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv2i8.i8.i64(i32 3, i32 31, <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) {
+  sf_vc_xv_se_u8mf4(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xv_se_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv4i8.i8.i32(i32 3, i32 31, <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv4i8.i8.i64(i32 3, i32 31, <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) {
+  sf_vc_xv_se_u8mf2(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xv_se_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv8i8.i8.i32(i32 3, i32 31, <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv8i8.i8.i64(i32 3, i32 31, <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) {
+  sf_vc_xv_se_u8m1(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xv_se_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv16i8.i8.i32(i32 3, i32 31, <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv16i8.i8.i64(i32 3, i32 31, <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) {
+  sf_vc_xv_se_u8m2(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xv_se_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv32i8.i8.i32(i32 3, i32 31, <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv32i8.i8.i64(i32 3, i32 31, <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) {
+  sf_vc_xv_se_u8m4(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xv_se_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv64i8.i8.i32(i32 3, i32 31, <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv64i8.i8.i64(i32 3, i32 31, <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) {
+  sf_vc_xv_se_u8m8(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xv_se_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv1i16.i16.i32(i32 3, i32 31, <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv1i16.i16.i64(i32 3, i32 31, <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) {
+  sf_vc_xv_se_u16mf4(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xv_se_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv2i16.i16.i32(i32 3, i32 31, <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv2i16.i16.i64(i32 3, i32 31, <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) {
+  sf_vc_xv_se_u16mf2(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xv_se_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv4i16.i16.i32(i32 3, i32 31, <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv4i16.i16.i64(i32 3, i32 31, <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) {
+  sf_vc_xv_se_u16m1(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xv_se_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv8i16.i16.i32(i32 3, i32 31, <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv8i16.i16.i64(i32 3, i32 31, <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) {
+  sf_vc_xv_se_u16m2(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xv_se_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv16i16.i16.i32(i32 3, i32 31, <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv16i16.i16.i64(i32 3, i32 31, <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) {
+  sf_vc_xv_se_u16m4(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xv_se_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv32i16.i16.i32(i32 3, i32 31, <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv32i16.i16.i64(i32 3, i32 31, <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) {
+  sf_vc_xv_se_u16m8(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xv_se_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv1i32.i32.i32(i32 3, i32 31, <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv1i32.i32.i64(i32 3, i32 31, <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) {
+  sf_vc_xv_se_u32mf2(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xv_se_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv2i32.i32.i32(i32 3, i32 31, <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv2i32.i32.i64(i32 3, i32 31, <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) {
+  sf_vc_xv_se_u32m1(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xv_se_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv4i32.i32.i32(i32 3, i32 31, <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv4i32.i32.i64(i32 3, i32 31, <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) {
+  sf_vc_xv_se_u32m2(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xv_se_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv8i32.i32.i32(i32 3, i32 31, <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv8i32.i32.i64(i32 3, i32 31, <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) {
+  sf_vc_xv_se_u32m4(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xv_se_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv16i32.i32.i32(i32 3, i32 31, <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xv_se_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xv.se.nxv16i32.i32.i64(i32 3, i32 31, <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xv_se_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) {
+  sf_vc_xv_se_u32m8(p27_26, p11_7, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv1i8.i8.i32(i32 3, <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv1i8.i8.i64(i32 3, <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vc_v_xv_se_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xv_se_u8mf8(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv2i8.i8.i32(i32 3, <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv2i8.i8.i64(i32 3, <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vc_v_xv_se_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xv_se_u8mf4(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv4i8.i8.i32(i32 3, <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv4i8.i8.i64(i32 3, <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vc_v_xv_se_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xv_se_u8mf2(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv8i8.i8.i32(i32 3, <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv8i8.i8.i64(i32 3, <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vc_v_xv_se_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xv_se_u8m1(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv16i8.i8.i32(i32 3, <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv16i8.i8.i64(i32 3, <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vc_v_xv_se_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xv_se_u8m2(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv32i8.i8.i32(i32 3, <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv32i8.i8.i64(i32 3, <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_sf_vc_v_xv_se_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xv_se_u8m4(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv64i8.i8.i32(i32 3, <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv64i8.i8.i64(i32 3, <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_sf_vc_v_xv_se_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xv_se_u8m8(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv1i16.i16.i32(i32 3, <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv1i16.i16.i64(i32 3, <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_sf_vc_v_xv_se_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xv_se_u16mf4(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv2i16.i16.i32(i32 3, <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv2i16.i16.i64(i32 3, <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_sf_vc_v_xv_se_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xv_se_u16mf2(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv4i16.i16.i32(i32 3, <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv4i16.i16.i64(i32 3, <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_sf_vc_v_xv_se_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xv_se_u16m1(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv8i16.i16.i32(i32 3, <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv8i16.i16.i64(i32 3, <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_sf_vc_v_xv_se_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xv_se_u16m2(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv16i16.i16.i32(i32 3, <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv16i16.i16.i64(i32 3, <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_sf_vc_v_xv_se_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xv_se_u16m4(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv32i16.i16.i32(i32 3, <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv32i16.i16.i64(i32 3, <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_sf_vc_v_xv_se_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xv_se_u16m8(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv1i32.i32.i32(i32 3, <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv1i32.i32.i64(i32 3, <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_sf_vc_v_xv_se_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) {
+  return sf_vc_v_xv_se_u32mf2(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv2i32.i32.i32(i32 3, <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv2i32.i32.i64(i32 3, <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_sf_vc_v_xv_se_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) {
+  return sf_vc_v_xv_se_u32m1(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv4i32.i32.i32(i32 3, <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv4i32.i32.i64(i32 3, <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_sf_vc_v_xv_se_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) {
+  return sf_vc_v_xv_se_u32m2(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv8i32.i32.i32(i32 3, <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv8i32.i32.i64(i32 3, <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_sf_vc_v_xv_se_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) {
+  return sf_vc_v_xv_se_u32m4(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv16i32.i32.i32(i32 3, <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv16i32.i32.i64(i32 3, <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_sf_vc_v_xv_se_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) {
+  return sf_vc_v_xv_se_u32m8(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xv.nxv1i8.i8.i32(i32 3, <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xv.nxv1i8.i8.i64(i32 3, <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vc_v_xv_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xv_u8mf8(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xv.nxv2i8.i8.i32(i32 3, <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xv.nxv2i8.i8.i64(i32 3, <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vc_v_xv_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xv_u8mf4(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xv.nxv4i8.i8.i32(i32 3, <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xv.nxv4i8.i8.i64(i32 3, <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vc_v_xv_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xv_u8mf2(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xv.nxv8i8.i8.i32(i32 3, <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xv.nxv8i8.i8.i64(i32 3, <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vc_v_xv_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xv_u8m1(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xv.nxv16i8.i8.i32(i32 3, <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xv.nxv16i8.i8.i64(i32 3, <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vc_v_xv_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xv_u8m2(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xv.nxv32i8.i8.i32(i32 3, <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xv.nxv32i8.i8.i64(i32 3, <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_sf_vc_v_xv_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xv_u8m4(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xv.nxv64i8.i8.i32(i32 3, <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xv.nxv64i8.i8.i64(i32 3, <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_sf_vc_v_xv_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xv_u8m8(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xv.nxv1i16.i16.i32(i32 3, <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xv.nxv1i16.i16.i64(i32 3, <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_sf_vc_v_xv_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xv_u16mf4(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xv.nxv2i16.i16.i32(i32 3, <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xv.nxv2i16.i16.i64(i32 3, <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_sf_vc_v_xv_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xv_u16mf2(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xv.nxv4i16.i16.i32(i32 3, <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xv.nxv4i16.i16.i64(i32 3, <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_sf_vc_v_xv_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xv_u16m1(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xv.nxv8i16.i16.i32(i32 3, <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xv.nxv8i16.i16.i64(i32 3, <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_sf_vc_v_xv_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xv_u16m2(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xv.nxv16i16.i16.i32(i32 3, <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xv.nxv16i16.i16.i64(i32 3, <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_sf_vc_v_xv_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xv_u16m4(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xv.nxv32i16.i16.i32(i32 3, <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xv.nxv32i16.i16.i64(i32 3, <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_sf_vc_v_xv_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xv_u16m8(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xv.nxv1i32.i32.i32(i32 3, <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xv.nxv1i32.i32.i64(i32 3, <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_sf_vc_v_xv_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) {
+  return sf_vc_v_xv_u32mf2(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xv.nxv2i32.i32.i32(i32 3, <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xv.nxv2i32.i32.i64(i32 3, <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_sf_vc_v_xv_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) {
+  return sf_vc_v_xv_u32m1(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xv.nxv4i32.i32.i32(i32 3, <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xv.nxv4i32.i32.i64(i32 3, <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_sf_vc_v_xv_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) {
+  return sf_vc_v_xv_u32m2(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xv.nxv8i32.i32.i32(i32 3, <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xv.nxv8i32.i32.i64(i32 3, <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_sf_vc_v_xv_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) {
+  return sf_vc_v_xv_u32m4(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xv_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xv.nxv16i32.i32.i32(i32 3, <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xv_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xv.nxv16i32.i32.i64(i32 3, <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_sf_vc_v_xv_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) {
+  return sf_vc_v_xv_u32m8(p27_26, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv1i8.i32.i32(i32 3, i32 31, <vscale x 1 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv1i8.i32.i64(i32 3, i32 31, <vscale x 1 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_u8mf8(vuint8mf8_t vs2, size_t vl) {
+  sf_vc_iv_se_u8mf8(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv2i8.i32.i32(i32 3, i32 31, <vscale x 2 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv2i8.i32.i64(i32 3, i32 31, <vscale x 2 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_u8mf4(vuint8mf4_t vs2, size_t vl) {
+  sf_vc_iv_se_u8mf4(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv4i8.i32.i32(i32 3, i32 31, <vscale x 4 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv4i8.i32.i64(i32 3, i32 31, <vscale x 4 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_u8mf2(vuint8mf2_t vs2, size_t vl) {
+  sf_vc_iv_se_u8mf2(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv8i8.i32.i32(i32 3, i32 31, <vscale x 8 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv8i8.i32.i64(i32 3, i32 31, <vscale x 8 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_u8m1(vuint8m1_t vs2, size_t vl) {
+  sf_vc_iv_se_u8m1(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv16i8.i32.i32(i32 3, i32 31, <vscale x 16 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv16i8.i32.i64(i32 3, i32 31, <vscale x 16 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_u8m2(vuint8m2_t vs2, size_t vl) {
+  sf_vc_iv_se_u8m2(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv32i8.i32.i32(i32 3, i32 31, <vscale x 32 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv32i8.i32.i64(i32 3, i32 31, <vscale x 32 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_u8m4(vuint8m4_t vs2, size_t vl) {
+  sf_vc_iv_se_u8m4(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv64i8.i32.i32(i32 3, i32 31, <vscale x 64 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv64i8.i32.i64(i32 3, i32 31, <vscale x 64 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_u8m8(vuint8m8_t vs2, size_t vl) {
+  sf_vc_iv_se_u8m8(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv1i16.i32.i32(i32 3, i32 31, <vscale x 1 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv1i16.i32.i64(i32 3, i32 31, <vscale x 1 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_u16mf4(vuint16mf4_t vs2, size_t vl) {
+  sf_vc_iv_se_u16mf4(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv2i16.i32.i32(i32 3, i32 31, <vscale x 2 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv2i16.i32.i64(i32 3, i32 31, <vscale x 2 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_u16mf2(vuint16mf2_t vs2, size_t vl) {
+  sf_vc_iv_se_u16mf2(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv4i16.i32.i32(i32 3, i32 31, <vscale x 4 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv4i16.i32.i64(i32 3, i32 31, <vscale x 4 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_u16m1(vuint16m1_t vs2, size_t vl) {
+  sf_vc_iv_se_u16m1(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv8i16.i32.i32(i32 3, i32 31, <vscale x 8 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv8i16.i32.i64(i32 3, i32 31, <vscale x 8 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_u16m2(vuint16m2_t vs2, size_t vl) {
+  sf_vc_iv_se_u16m2(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv16i16.i32.i32(i32 3, i32 31, <vscale x 16 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv16i16.i32.i64(i32 3, i32 31, <vscale x 16 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_u16m4(vuint16m4_t vs2, size_t vl) {
+  sf_vc_iv_se_u16m4(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv32i16.i32.i32(i32 3, i32 31, <vscale x 32 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv32i16.i32.i64(i32 3, i32 31, <vscale x 32 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_u16m8(vuint16m8_t vs2, size_t vl) {
+  sf_vc_iv_se_u16m8(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv1i32.i32.i32(i32 3, i32 31, <vscale x 1 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv1i32.i32.i64(i32 3, i32 31, <vscale x 1 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_u32mf2(vuint32mf2_t vs2, size_t vl) {
+  sf_vc_iv_se_u32mf2(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv2i32.i32.i32(i32 3, i32 31, <vscale x 2 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv2i32.i32.i64(i32 3, i32 31, <vscale x 2 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_u32m1(vuint32m1_t vs2, size_t vl) {
+  sf_vc_iv_se_u32m1(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv4i32.i32.i32(i32 3, i32 31, <vscale x 4 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv4i32.i32.i64(i32 3, i32 31, <vscale x 4 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_u32m2(vuint32m2_t vs2, size_t vl) {
+  sf_vc_iv_se_u32m2(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv8i32.i32.i32(i32 3, i32 31, <vscale x 8 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv8i32.i32.i64(i32 3, i32 31, <vscale x 8 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_u32m4(vuint32m4_t vs2, size_t vl) {
+  sf_vc_iv_se_u32m4(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv16i32.i32.i32(i32 3, i32 31, <vscale x 16 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv16i32.i32.i64(i32 3, i32 31, <vscale x 16 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_u32m8(vuint32m8_t vs2, size_t vl) {
+  sf_vc_iv_se_u32m8(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv1i64.i32.i32(i32 3, i32 31, <vscale x 1 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv1i64.i32.i64(i32 3, i32 31, <vscale x 1 x i64> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_u64m1(vuint64m1_t vs2, size_t vl) {
+  sf_vc_iv_se_u64m1(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv2i64.i32.i32(i32 3, i32 31, <vscale x 2 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv2i64.i32.i64(i32 3, i32 31, <vscale x 2 x i64> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_u64m2(vuint64m2_t vs2, size_t vl) {
+  sf_vc_iv_se_u64m2(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv4i64.i32.i32(i32 3, i32 31, <vscale x 4 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv4i64.i32.i64(i32 3, i32 31, <vscale x 4 x i64> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_u64m4(vuint64m4_t vs2, size_t vl) {
+  sf_vc_iv_se_u64m4(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_iv_se_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv8i64.i32.i32(i32 3, i32 31, <vscale x 8 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_iv_se_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.iv.se.nxv8i64.i32.i64(i32 3, i32 31, <vscale x 8 x i64> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_iv_se_u64m8(vuint64m8_t vs2, size_t vl) {
+  sf_vc_iv_se_u64m8(p27_26, p11_7, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv1i8.i32.i32(i32 3, <vscale x 1 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv1i8.i32.i64(i32 3, <vscale x 1 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vc_v_iv_se_u8mf8(vuint8mf8_t vs2, size_t vl) {
+  return sf_vc_v_iv_se_u8mf8(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv2i8.i32.i32(i32 3, <vscale x 2 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv2i8.i32.i64(i32 3, <vscale x 2 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vc_v_iv_se_u8mf4(vuint8mf4_t vs2, size_t vl) {
+  return sf_vc_v_iv_se_u8mf4(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv4i8.i32.i32(i32 3, <vscale x 4 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv4i8.i32.i64(i32 3, <vscale x 4 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vc_v_iv_se_u8mf2(vuint8mf2_t vs2, size_t vl) {
+  return sf_vc_v_iv_se_u8mf2(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv8i8.i32.i32(i32 3, <vscale x 8 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv8i8.i32.i64(i32 3, <vscale x 8 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vc_v_iv_se_u8m1(vuint8m1_t vs2, size_t vl) {
+  return sf_vc_v_iv_se_u8m1(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv16i8.i32.i32(i32 3, <vscale x 16 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv16i8.i32.i64(i32 3, <vscale x 16 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vc_v_iv_se_u8m2(vuint8m2_t vs2, size_t vl) {
+  return sf_vc_v_iv_se_u8m2(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv32i8.i32.i32(i32 3, <vscale x 32 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv32i8.i32.i64(i32 3, <vscale x 32 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_sf_vc_v_iv_se_u8m4(vuint8m4_t vs2, size_t vl) {
+  return sf_vc_v_iv_se_u8m4(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv64i8.i32.i32(i32 3, <vscale x 64 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv64i8.i32.i64(i32 3, <vscale x 64 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_sf_vc_v_iv_se_u8m8(vuint8m8_t vs2, size_t vl) {
+  return sf_vc_v_iv_se_u8m8(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv1i16.i32.i32(i32 3, <vscale x 1 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv1i16.i32.i64(i32 3, <vscale x 1 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_sf_vc_v_iv_se_u16mf4(vuint16mf4_t vs2, size_t vl) {
+  return sf_vc_v_iv_se_u16mf4(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv2i16.i32.i32(i32 3, <vscale x 2 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv2i16.i32.i64(i32 3, <vscale x 2 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_sf_vc_v_iv_se_u16mf2(vuint16mf2_t vs2, size_t vl) {
+  return sf_vc_v_iv_se_u16mf2(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv4i16.i32.i32(i32 3, <vscale x 4 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv4i16.i32.i64(i32 3, <vscale x 4 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_sf_vc_v_iv_se_u16m1(vuint16m1_t vs2, size_t vl) {
+  return sf_vc_v_iv_se_u16m1(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv8i16.i32.i32(i32 3, <vscale x 8 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv8i16.i32.i64(i32 3, <vscale x 8 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_sf_vc_v_iv_se_u16m2(vuint16m2_t vs2, size_t vl) {
+  return sf_vc_v_iv_se_u16m2(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv16i16.i32.i32(i32 3, <vscale x 16 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv16i16.i32.i64(i32 3, <vscale x 16 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_sf_vc_v_iv_se_u16m4(vuint16m4_t vs2, size_t vl) {
+  return sf_vc_v_iv_se_u16m4(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv32i16.i32.i32(i32 3, <vscale x 32 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv32i16.i32.i64(i32 3, <vscale x 32 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_sf_vc_v_iv_se_u16m8(vuint16m8_t vs2, size_t vl) {
+  return sf_vc_v_iv_se_u16m8(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv1i32.i32.i32(i32 3, <vscale x 1 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv1i32.i32.i64(i32 3, <vscale x 1 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_sf_vc_v_iv_se_u32mf2(vuint32mf2_t vs2, size_t vl) {
+  return sf_vc_v_iv_se_u32mf2(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv2i32.i32.i32(i32 3, <vscale x 2 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv2i32.i32.i64(i32 3, <vscale x 2 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_sf_vc_v_iv_se_u32m1(vuint32m1_t vs2, size_t vl) {
+  return sf_vc_v_iv_se_u32m1(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv4i32.i32.i32(i32 3, <vscale x 4 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv4i32.i32.i64(i32 3, <vscale x 4 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_sf_vc_v_iv_se_u32m2(vuint32m2_t vs2, size_t vl) {
+  return sf_vc_v_iv_se_u32m2(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv8i32.i32.i32(i32 3, <vscale x 8 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv8i32.i32.i64(i32 3, <vscale x 8 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_sf_vc_v_iv_se_u32m4(vuint32m4_t vs2, size_t vl) {
+  return sf_vc_v_iv_se_u32m4(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv16i32.i32.i32(i32 3, <vscale x 16 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv16i32.i32.i64(i32 3, <vscale x 16 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_sf_vc_v_iv_se_u32m8(vuint32m8_t vs2, size_t vl) {
+  return sf_vc_v_iv_se_u32m8(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv1i64.i32.i32(i32 3, <vscale x 1 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv1i64.i32.i64(i32 3, <vscale x 1 x i64> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_sf_vc_v_iv_se_u64m1(vuint64m1_t vs2, size_t vl) {
+  return sf_vc_v_iv_se_u64m1(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv2i64.i32.i32(i32 3, <vscale x 2 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv2i64.i32.i64(i32 3, <vscale x 2 x i64> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_sf_vc_v_iv_se_u64m2(vuint64m2_t vs2, size_t vl) {
+  return sf_vc_v_iv_se_u64m2(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv4i64.i32.i32(i32 3, <vscale x 4 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv4i64.i32.i64(i32 3, <vscale x 4 x i64> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_sf_vc_v_iv_se_u64m4(vuint64m4_t vs2, size_t vl) {
+  return sf_vc_v_iv_se_u64m4(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv8i64.i32.i32(i32 3, <vscale x 8 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv8i64.i32.i64(i32 3, <vscale x 8 x i64> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_sf_vc_v_iv_se_u64m8(vuint64m8_t vs2, size_t vl) {
+  return sf_vc_v_iv_se_u64m8(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.iv.nxv1i8.i32.i32(i32 3, <vscale x 1 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.iv.nxv1i8.i32.i64(i32 3, <vscale x 1 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vc_v_iv_u8mf8(vuint8mf8_t vs2, size_t vl) {
+  return sf_vc_v_iv_u8mf8(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.iv.nxv2i8.i32.i32(i32 3, <vscale x 2 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.iv.nxv2i8.i32.i64(i32 3, <vscale x 2 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vc_v_iv_u8mf4(vuint8mf4_t vs2, size_t vl) {
+  return sf_vc_v_iv_u8mf4(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.iv.nxv4i8.i32.i32(i32 3, <vscale x 4 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.iv.nxv4i8.i32.i64(i32 3, <vscale x 4 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vc_v_iv_u8mf2(vuint8mf2_t vs2, size_t vl) {
+  return sf_vc_v_iv_u8mf2(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.iv.nxv8i8.i32.i32(i32 3, <vscale x 8 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.iv.nxv8i8.i32.i64(i32 3, <vscale x 8 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vc_v_iv_u8m1(vuint8m1_t vs2, size_t vl) {
+  return sf_vc_v_iv_u8m1(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.iv.nxv16i8.i32.i32(i32 3, <vscale x 16 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.iv.nxv16i8.i32.i64(i32 3, <vscale x 16 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vc_v_iv_u8m2(vuint8m2_t vs2, size_t vl) {
+  return sf_vc_v_iv_u8m2(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.iv.nxv32i8.i32.i32(i32 3, <vscale x 32 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.iv.nxv32i8.i32.i64(i32 3, <vscale x 32 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_sf_vc_v_iv_u8m4(vuint8m4_t vs2, size_t vl) {
+  return sf_vc_v_iv_u8m4(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.iv.nxv64i8.i32.i32(i32 3, <vscale x 64 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.iv.nxv64i8.i32.i64(i32 3, <vscale x 64 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_sf_vc_v_iv_u8m8(vuint8m8_t vs2, size_t vl) {
+  return sf_vc_v_iv_u8m8(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.iv.nxv1i16.i32.i32(i32 3, <vscale x 1 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.iv.nxv1i16.i32.i64(i32 3, <vscale x 1 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_sf_vc_v_iv_u16mf4(vuint16mf4_t vs2, size_t vl) {
+  return sf_vc_v_iv_u16mf4(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.iv.nxv2i16.i32.i32(i32 3, <vscale x 2 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.iv.nxv2i16.i32.i64(i32 3, <vscale x 2 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_sf_vc_v_iv_u16mf2(vuint16mf2_t vs2, size_t vl) {
+  return sf_vc_v_iv_u16mf2(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.iv.nxv4i16.i32.i32(i32 3, <vscale x 4 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.iv.nxv4i16.i32.i64(i32 3, <vscale x 4 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_sf_vc_v_iv_u16m1(vuint16m1_t vs2, size_t vl) {
+  return sf_vc_v_iv_u16m1(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.iv.nxv8i16.i32.i32(i32 3, <vscale x 8 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.iv.nxv8i16.i32.i64(i32 3, <vscale x 8 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_sf_vc_v_iv_u16m2(vuint16m2_t vs2, size_t vl) {
+  return sf_vc_v_iv_u16m2(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.iv.nxv16i16.i32.i32(i32 3, <vscale x 16 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.iv.nxv16i16.i32.i64(i32 3, <vscale x 16 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_sf_vc_v_iv_u16m4(vuint16m4_t vs2, size_t vl) {
+  return sf_vc_v_iv_u16m4(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.iv.nxv32i16.i32.i32(i32 3, <vscale x 32 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.iv.nxv32i16.i32.i64(i32 3, <vscale x 32 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_sf_vc_v_iv_u16m8(vuint16m8_t vs2, size_t vl) {
+  return sf_vc_v_iv_u16m8(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.iv.nxv1i32.i32.i32(i32 3, <vscale x 1 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.iv.nxv1i32.i32.i64(i32 3, <vscale x 1 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_sf_vc_v_iv_u32mf2(vuint32mf2_t vs2, size_t vl) {
+  return sf_vc_v_iv_u32mf2(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.iv.nxv2i32.i32.i32(i32 3, <vscale x 2 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.iv.nxv2i32.i32.i64(i32 3, <vscale x 2 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_sf_vc_v_iv_u32m1(vuint32m1_t vs2, size_t vl) {
+  return sf_vc_v_iv_u32m1(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.iv.nxv4i32.i32.i32(i32 3, <vscale x 4 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.iv.nxv4i32.i32.i64(i32 3, <vscale x 4 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_sf_vc_v_iv_u32m2(vuint32m2_t vs2, size_t vl) {
+  return sf_vc_v_iv_u32m2(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.iv.nxv8i32.i32.i32(i32 3, <vscale x 8 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.iv.nxv8i32.i32.i64(i32 3, <vscale x 8 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_sf_vc_v_iv_u32m4(vuint32m4_t vs2, size_t vl) {
+  return sf_vc_v_iv_u32m4(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.iv.nxv16i32.i32.i32(i32 3, <vscale x 16 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.iv.nxv16i32.i32.i64(i32 3, <vscale x 16 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_sf_vc_v_iv_u32m8(vuint32m8_t vs2, size_t vl) {
+  return sf_vc_v_iv_u32m8(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.iv.nxv1i64.i32.i32(i32 3, <vscale x 1 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.iv.nxv1i64.i32.i64(i32 3, <vscale x 1 x i64> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_sf_vc_v_iv_u64m1(vuint64m1_t vs2, size_t vl) {
+  return sf_vc_v_iv_u64m1(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.iv.nxv2i64.i32.i32(i32 3, <vscale x 2 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.iv.nxv2i64.i32.i64(i32 3, <vscale x 2 x i64> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_sf_vc_v_iv_u64m2(vuint64m2_t vs2, size_t vl) {
+  return sf_vc_v_iv_u64m2(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.iv.nxv4i64.i32.i32(i32 3, <vscale x 4 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.iv.nxv4i64.i32.i64(i32 3, <vscale x 4 x i64> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_sf_vc_v_iv_u64m4(vuint64m4_t vs2, size_t vl) {
+  return sf_vc_v_iv_u64m4(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_iv_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.iv.nxv8i64.i32.i32(i32 3, <vscale x 8 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_iv_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.iv.nxv8i64.i32.i64(i32 3, <vscale x 8 x i64> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_sf_vc_v_iv_u64m8(vuint64m8_t vs2, size_t vl) {
+  return sf_vc_v_iv_u64m8(p27_26, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fv_se_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fv.se.nxv1i16.f16.i32(i32 1, i32 31, <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fv_se_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fv.se.nxv1i16.f16.i64(i32 1, i32 31, <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fv_se_u16mf4(vuint16mf4_t vs2, _Float16 fs1, size_t vl) {
+  sf_vc_fv_se_u16mf4(p27, p11_7, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fv_se_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fv.se.nxv2i16.f16.i32(i32 1, i32 31, <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fv_se_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fv.se.nxv2i16.f16.i64(i32 1, i32 31, <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fv_se_u16mf2(vuint16mf2_t vs2, _Float16 fs1, size_t vl) {
+  sf_vc_fv_se_u16mf2(p27, p11_7, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fv_se_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fv.se.nxv4i16.f16.i32(i32 1, i32 31, <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fv_se_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fv.se.nxv4i16.f16.i64(i32 1, i32 31, <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fv_se_u16m1(vuint16m1_t vs2, _Float16 fs1, size_t vl) {
+  sf_vc_fv_se_u16m1(p27, p11_7, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fv_se_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fv.se.nxv8i16.f16.i32(i32 1, i32 31, <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fv_se_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fv.se.nxv8i16.f16.i64(i32 1, i32 31, <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fv_se_u16m2(vuint16m2_t vs2, _Float16 fs1, size_t vl) {
+  sf_vc_fv_se_u16m2(p27, p11_7, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fv_se_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fv.se.nxv16i16.f16.i32(i32 1, i32 31, <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fv_se_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fv.se.nxv16i16.f16.i64(i32 1, i32 31, <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fv_se_u16m4(vuint16m4_t vs2, _Float16 fs1, size_t vl) {
+  sf_vc_fv_se_u16m4(p27, p11_7, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fv_se_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fv.se.nxv32i16.f16.i32(i32 1, i32 31, <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fv_se_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fv.se.nxv32i16.f16.i64(i32 1, i32 31, <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fv_se_u16m8(vuint16m8_t vs2, _Float16 fs1, size_t vl) {
+  sf_vc_fv_se_u16m8(p27, p11_7, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fv_se_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fv.se.nxv1i32.f32.i32(i32 1, i32 31, <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fv_se_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fv.se.nxv1i32.f32.i64(i32 1, i32 31, <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fv_se_u32mf2(vuint32mf2_t vs2, float fs1, size_t vl) {
+  sf_vc_fv_se_u32mf2(p27, p11_7, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fv_se_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fv.se.nxv2i32.f32.i32(i32 1, i32 31, <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fv_se_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fv.se.nxv2i32.f32.i64(i32 1, i32 31, <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fv_se_u32m1(vuint32m1_t vs2, float fs1, size_t vl) {
+  sf_vc_fv_se_u32m1(p27, p11_7, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fv_se_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fv.se.nxv4i32.f32.i32(i32 1, i32 31, <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fv_se_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fv.se.nxv4i32.f32.i64(i32 1, i32 31, <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fv_se_u32m2(vuint32m2_t vs2, float fs1, size_t vl) {
+  sf_vc_fv_se_u32m2(p27, p11_7, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fv_se_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fv.se.nxv8i32.f32.i32(i32 1, i32 31, <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fv_se_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fv.se.nxv8i32.f32.i64(i32 1, i32 31, <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fv_se_u32m4(vuint32m4_t vs2, float fs1, size_t vl) {
+  sf_vc_fv_se_u32m4(p27, p11_7, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fv_se_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fv.se.nxv16i32.f32.i32(i32 1, i32 31, <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fv_se_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fv.se.nxv16i32.f32.i64(i32 1, i32 31, <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fv_se_u32m8(vuint32m8_t vs2, float fs1, size_t vl) {
+  sf_vc_fv_se_u32m8(p27, p11_7, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fv_se_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fv.se.nxv1i64.f64.i32(i32 1, i32 31, <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fv_se_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fv.se.nxv1i64.f64.i64(i32 1, i32 31, <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fv_se_u64m1(vuint64m1_t vs2, double fs1, size_t vl) {
+  sf_vc_fv_se_u64m1(p27, p11_7, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fv_se_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fv.se.nxv2i64.f64.i32(i32 1, i32 31, <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fv_se_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fv.se.nxv2i64.f64.i64(i32 1, i32 31, <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fv_se_u64m2(vuint64m2_t vs2, double fs1, size_t vl) {
+  sf_vc_fv_se_u64m2(p27, p11_7, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fv_se_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fv.se.nxv4i64.f64.i32(i32 1, i32 31, <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fv_se_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fv.se.nxv4i64.f64.i64(i32 1, i32 31, <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fv_se_u64m4(vuint64m4_t vs2, double fs1, size_t vl) {
+  sf_vc_fv_se_u64m4(p27, p11_7, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fv_se_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fv.se.nxv8i64.f64.i32(i32 1, i32 31, <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fv_se_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fv.se.nxv8i64.f64.i64(i32 1, i32 31, <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fv_se_u64m8(vuint64m8_t vs2, double fs1, size_t vl) {
+  sf_vc_fv_se_u64m8(p27, p11_7, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv1i16.f16.i32(i32 1, <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv1i16.f16.i64(i32 1, <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_sf_vc_v_fv_se_u16mf4(vuint16mf4_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fv_se_u16mf4(p27, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv2i16.f16.i32(i32 1, <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv2i16.f16.i64(i32 1, <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_sf_vc_v_fv_se_u16mf2(vuint16mf2_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fv_se_u16mf2(p27, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv4i16.f16.i32(i32 1, <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv4i16.f16.i64(i32 1, <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_sf_vc_v_fv_se_u16m1(vuint16m1_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fv_se_u16m1(p27, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv8i16.f16.i32(i32 1, <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv8i16.f16.i64(i32 1, <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_sf_vc_v_fv_se_u16m2(vuint16m2_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fv_se_u16m2(p27, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv16i16.f16.i32(i32 1, <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv16i16.f16.i64(i32 1, <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_sf_vc_v_fv_se_u16m4(vuint16m4_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fv_se_u16m4(p27, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv32i16.f16.i32(i32 1, <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv32i16.f16.i64(i32 1, <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_sf_vc_v_fv_se_u16m8(vuint16m8_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fv_se_u16m8(p27, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv1i32.f32.i32(i32 1, <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv1i32.f32.i64(i32 1, <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_sf_vc_v_fv_se_u32mf2(vuint32mf2_t vs2, float fs1, size_t vl) {
+  return sf_vc_v_fv_se_u32mf2(p27, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv2i32.f32.i32(i32 1, <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv2i32.f32.i64(i32 1, <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_sf_vc_v_fv_se_u32m1(vuint32m1_t vs2, float fs1, size_t vl) {
+  return sf_vc_v_fv_se_u32m1(p27, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv4i32.f32.i32(i32 1, <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv4i32.f32.i64(i32 1, <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_sf_vc_v_fv_se_u32m2(vuint32m2_t vs2, float fs1, size_t vl) {
+  return sf_vc_v_fv_se_u32m2(p27, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv8i32.f32.i32(i32 1, <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv8i32.f32.i64(i32 1, <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_sf_vc_v_fv_se_u32m4(vuint32m4_t vs2, float fs1, size_t vl) {
+  return sf_vc_v_fv_se_u32m4(p27, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv16i32.f32.i32(i32 1, <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv16i32.f32.i64(i32 1, <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_sf_vc_v_fv_se_u32m8(vuint32m8_t vs2, float fs1, size_t vl) {
+  return sf_vc_v_fv_se_u32m8(p27, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv1i64.f64.i32(i32 1, <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv1i64.f64.i64(i32 1, <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_sf_vc_v_fv_se_u64m1(vuint64m1_t vs2, double fs1, size_t vl) {
+  return sf_vc_v_fv_se_u64m1(p27, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv2i64.f64.i32(i32 1, <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv2i64.f64.i64(i32 1, <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_sf_vc_v_fv_se_u64m2(vuint64m2_t vs2, double fs1, size_t vl) {
+  return sf_vc_v_fv_se_u64m2(p27, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv4i64.f64.i32(i32 1, <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv4i64.f64.i64(i32 1, <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_sf_vc_v_fv_se_u64m4(vuint64m4_t vs2, double fs1, size_t vl) {
+  return sf_vc_v_fv_se_u64m4(p27, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv8i64.f64.i32(i32 1, <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv8i64.f64.i64(i32 1, <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_sf_vc_v_fv_se_u64m8(vuint64m8_t vs2, double fs1, size_t vl) {
+  return sf_vc_v_fv_se_u64m8(p27, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fv.nxv1i16.f16.i32(i32 1, <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fv.nxv1i16.f16.i64(i32 1, <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_sf_vc_v_fv_u16mf4(vuint16mf4_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fv_u16mf4(p27, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fv.nxv2i16.f16.i32(i32 1, <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fv.nxv2i16.f16.i64(i32 1, <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_sf_vc_v_fv_u16mf2(vuint16mf2_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fv_u16mf2(p27, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fv.nxv4i16.f16.i32(i32 1, <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fv.nxv4i16.f16.i64(i32 1, <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_sf_vc_v_fv_u16m1(vuint16m1_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fv_u16m1(p27, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fv.nxv8i16.f16.i32(i32 1, <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fv.nxv8i16.f16.i64(i32 1, <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_sf_vc_v_fv_u16m2(vuint16m2_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fv_u16m2(p27, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fv.nxv16i16.f16.i32(i32 1, <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fv.nxv16i16.f16.i64(i32 1, <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_sf_vc_v_fv_u16m4(vuint16m4_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fv_u16m4(p27, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fv.nxv32i16.f16.i32(i32 1, <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fv.nxv32i16.f16.i64(i32 1, <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_sf_vc_v_fv_u16m8(vuint16m8_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fv_u16m8(p27, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fv.nxv1i32.f32.i32(i32 1, <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fv.nxv1i32.f32.i64(i32 1, <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_sf_vc_v_fv_u32mf2(vuint32mf2_t vs2, float fs1, size_t vl) {
+  return sf_vc_v_fv_u32mf2(p27, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fv.nxv2i32.f32.i32(i32 1, <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fv.nxv2i32.f32.i64(i32 1, <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_sf_vc_v_fv_u32m1(vuint32m1_t vs2, float fs1, size_t vl) {
+  return sf_vc_v_fv_u32m1(p27, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fv.nxv4i32.f32.i32(i32 1, <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fv.nxv4i32.f32.i64(i32 1, <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_sf_vc_v_fv_u32m2(vuint32m2_t vs2, float fs1, size_t vl) {
+  return sf_vc_v_fv_u32m2(p27, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fv.nxv8i32.f32.i32(i32 1, <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fv.nxv8i32.f32.i64(i32 1, <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_sf_vc_v_fv_u32m4(vuint32m4_t vs2, float fs1, size_t vl) {
+  return sf_vc_v_fv_u32m4(p27, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fv.nxv16i32.f32.i32(i32 1, <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fv.nxv16i32.f32.i64(i32 1, <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_sf_vc_v_fv_u32m8(vuint32m8_t vs2, float fs1, size_t vl) {
+  return sf_vc_v_fv_u32m8(p27, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fv.nxv1i64.f64.i32(i32 1, <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fv.nxv1i64.f64.i64(i32 1, <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_sf_vc_v_fv_u64m1(vuint64m1_t vs2, double fs1, size_t vl) {
+  return sf_vc_v_fv_u64m1(p27, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fv.nxv2i64.f64.i32(i32 1, <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fv.nxv2i64.f64.i64(i32 1, <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_sf_vc_v_fv_u64m2(vuint64m2_t vs2, double fs1, size_t vl) {
+  return sf_vc_v_fv_u64m2(p27, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fv.nxv4i64.f64.i32(i32 1, <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fv.nxv4i64.f64.i64(i32 1, <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_sf_vc_v_fv_u64m4(vuint64m4_t vs2, double fs1, size_t vl) {
+  return sf_vc_v_fv_u64m4(p27, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fv_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fv.nxv8i64.f64.i32(i32 1, <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fv_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fv.nxv8i64.f64.i64(i32 1, <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_sf_vc_v_fv_u64m8(vuint64m8_t vs2, double fs1, size_t vl) {
+  return sf_vc_v_fv_u64m8(p27, vs2, fs1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/xsfvcp-xvv-rv64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/xsfvcp-xvv-rv64.c
new file mode 100644
index 0000000000000..fee251b1b6fd8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/xsfvcp-xvv-rv64.c
@@ -0,0 +1,115 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+const int p27_26 = 3;
+
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv1i64.i64.i64(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  sf_vc_xvv_se_u64m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv2i64.i64.i64(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  sf_vc_xvv_se_u64m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv4i64.i64.i64(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  sf_vc_xvv_se_u64m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv8i64.i64.i64(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  sf_vc_xvv_se_u64m8(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvv.se.nxv1i64.i64.i64(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_sf_vc_v_xvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return sf_vc_v_xvv_se_u64m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvv.se.nxv2i64.i64.i64(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_sf_vc_v_xvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return sf_vc_v_xvv_se_u64m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvv.se.nxv4i64.i64.i64(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_sf_vc_v_xvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return sf_vc_v_xvv_se_u64m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvv.se.nxv8i64.i64.i64(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_sf_vc_v_xvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return sf_vc_v_xvv_se_u64m8(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvv.nxv1i64.i64.i64(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_sf_vc_v_xvv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
+  return sf_vc_v_xvv_u64m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvv.nxv2i64.i64.i64(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_sf_vc_v_xvv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
+  return sf_vc_v_xvv_u64m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvv.nxv4i64.i64.i64(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_sf_vc_v_xvv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
+  return sf_vc_v_xvv_u64m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvv.nxv8i64.i64.i64(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_sf_vc_v_xvv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
+  return sf_vc_v_xvv_u64m8(p27_26, vd, vs2, rs1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/xsfvcp-xvv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/xsfvcp-xvv.c
new file mode 100644
index 0000000000000..d5d95693115e6
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/xsfvcp-xvv.c
@@ -0,0 +1,3243 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple riscv32 -target-feature +v -target-feature +zfh -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+const int p27_26 = 3;
+const int p27 = 1;
+const int simm5 = 10;
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv1i8.nxv1i8.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv1i8.nxv1i8.i64(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  sf_vc_vvv_se_u8mf8(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv2i8.nxv2i8.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv2i8.nxv2i8.i64(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+  sf_vc_vvv_se_u8mf4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv4i8.nxv4i8.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv4i8.nxv4i8.i64(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  sf_vc_vvv_se_u8mf2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv8i8.nxv8i8.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv8i8.nxv8i8.i64(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  sf_vc_vvv_se_u8m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv16i8.nxv16i8.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv16i8.nxv16i8.i64(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+  sf_vc_vvv_se_u8m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv32i8.nxv32i8.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv32i8.nxv32i8.i64(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+  sf_vc_vvv_se_u8m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv64i8.nxv64i8.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv64i8.nxv64i8.i64(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+  sf_vc_vvv_se_u8m8(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv1i16.nxv1i16.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv1i16.nxv1i16.i64(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+  sf_vc_vvv_se_u16mf4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv2i16.nxv2i16.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv2i16.nxv2i16.i64(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+  sf_vc_vvv_se_u16mf2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv4i16.nxv4i16.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv4i16.nxv4i16.i64(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+  sf_vc_vvv_se_u16m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv8i16.nxv8i16.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv8i16.nxv8i16.i64(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+  sf_vc_vvv_se_u16m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv16i16.nxv16i16.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv16i16.nxv16i16.i64(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+  sf_vc_vvv_se_u16m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv32i16.nxv32i16.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv32i16.nxv32i16.i64(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
+  sf_vc_vvv_se_u16m8(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv1i32.nxv1i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv1i32.nxv1i32.i64(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  sf_vc_vvv_se_u32mf2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv2i32.nxv2i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv2i32.nxv2i32.i64(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  sf_vc_vvv_se_u32m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv4i32.nxv4i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv4i32.nxv4i32.i64(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  sf_vc_vvv_se_u32m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv8i32.nxv8i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv8i32.nxv8i32.i64(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  sf_vc_vvv_se_u32m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv16i32.nxv16i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv16i32.nxv16i32.i64(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  sf_vc_vvv_se_u32m8(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv1i64.nxv1i64.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv1i64.nxv1i64.i64(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  sf_vc_vvv_se_u64m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv2i64.nxv2i64.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv2i64.nxv2i64.i64(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  sf_vc_vvv_se_u64m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv4i64.nxv4i64.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv4i64.nxv4i64.i64(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  sf_vc_vvv_se_u64m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv8i64.nxv8i64.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.nxv8i64.nxv8i64.i64(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  sf_vc_vvv_se_u64m8(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv1i8.nxv1i8.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv1i8.nxv1i8.i64(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vc_v_vvv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return sf_vc_v_vvv_se_u8mf8(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv2i8.nxv2i8.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv2i8.nxv2i8.i64(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vc_v_vvv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+  return sf_vc_v_vvv_se_u8mf4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv4i8.nxv4i8.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv4i8.nxv4i8.i64(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vc_v_vvv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return sf_vc_v_vvv_se_u8mf2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv8i8.nxv8i8.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv8i8.nxv8i8.i64(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vc_v_vvv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return sf_vc_v_vvv_se_u8m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv16i8.nxv16i8.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv16i8.nxv16i8.i64(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vc_v_vvv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+  return sf_vc_v_vvv_se_u8m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv32i8.nxv32i8.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv32i8.nxv32i8.i64(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_sf_vc_v_vvv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+  return sf_vc_v_vvv_se_u8m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.nxv64i8.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.nxv64i8.i64(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_sf_vc_v_vvv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+  return sf_vc_v_vvv_se_u8m8(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv1i16.nxv1i16.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv1i16.nxv1i16.i64(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_sf_vc_v_vvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+  return sf_vc_v_vvv_se_u16mf4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv2i16.nxv2i16.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv2i16.nxv2i16.i64(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_sf_vc_v_vvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+  return sf_vc_v_vvv_se_u16mf2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv4i16.nxv4i16.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv4i16.nxv4i16.i64(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_sf_vc_v_vvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+  return sf_vc_v_vvv_se_u16m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv8i16.nxv8i16.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv8i16.nxv8i16.i64(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_sf_vc_v_vvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+  return sf_vc_v_vvv_se_u16m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv16i16.nxv16i16.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv16i16.nxv16i16.i64(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_sf_vc_v_vvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+  return sf_vc_v_vvv_se_u16m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.nxv32i16.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.nxv32i16.i64(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_sf_vc_v_vvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
+  return sf_vc_v_vvv_se_u16m8(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv1i32.nxv1i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv1i32.nxv1i32.i64(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_sf_vc_v_vvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return sf_vc_v_vvv_se_u32mf2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv2i32.nxv2i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv2i32.nxv2i32.i64(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_sf_vc_v_vvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return sf_vc_v_vvv_se_u32m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv4i32.nxv4i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv4i32.nxv4i32.i64(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_sf_vc_v_vvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return sf_vc_v_vvv_se_u32m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv8i32.nxv8i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv8i32.nxv8i32.i64(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_sf_vc_v_vvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return sf_vc_v_vvv_se_u32m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.nxv16i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.nxv16i32.i64(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_sf_vc_v_vvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return sf_vc_v_vvv_se_u32m8(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv1i64.nxv1i64.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv1i64.nxv1i64.i64(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_sf_vc_v_vvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return sf_vc_v_vvv_se_u64m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv2i64.nxv2i64.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv2i64.nxv2i64.i64(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_sf_vc_v_vvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return sf_vc_v_vvv_se_u64m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv4i64.nxv4i64.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv4i64.nxv4i64.i64(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_sf_vc_v_vvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return sf_vc_v_vvv_se_u64m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.nxv8i64.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.nxv8i64.i64(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_sf_vc_v_vvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return sf_vc_v_vvv_se_u64m8(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.nxv1i8.nxv1i8.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.nxv1i8.nxv1i8.i64(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vc_v_vvv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return sf_vc_v_vvv_u8mf8(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.nxv2i8.nxv2i8.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.nxv2i8.nxv2i8.i64(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vc_v_vvv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+  return sf_vc_v_vvv_u8mf4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.nxv4i8.nxv4i8.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.nxv4i8.nxv4i8.i64(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vc_v_vvv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return sf_vc_v_vvv_u8mf2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.nxv8i8.nxv8i8.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.nxv8i8.nxv8i8.i64(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vc_v_vvv_u8m1(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return sf_vc_v_vvv_u8m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.nxv16i8.nxv16i8.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.nxv16i8.nxv16i8.i64(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vc_v_vvv_u8m2(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+  return sf_vc_v_vvv_u8m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.nxv32i8.nxv32i8.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.nxv32i8.nxv32i8.i64(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_sf_vc_v_vvv_u8m4(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+  return sf_vc_v_vvv_u8m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.nxv64i8.nxv64i8.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.nxv64i8.nxv64i8.i64(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_sf_vc_v_vvv_u8m8(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+  return sf_vc_v_vvv_u8m8(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.nxv1i16.nxv1i16.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.nxv1i16.nxv1i16.i64(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_sf_vc_v_vvv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+  return sf_vc_v_vvv_u16mf4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.nxv2i16.nxv2i16.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.nxv2i16.nxv2i16.i64(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_sf_vc_v_vvv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+  return sf_vc_v_vvv_u16mf2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.nxv4i16.nxv4i16.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.nxv4i16.nxv4i16.i64(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_sf_vc_v_vvv_u16m1(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+  return sf_vc_v_vvv_u16m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.nxv8i16.nxv8i16.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.nxv8i16.nxv8i16.i64(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_sf_vc_v_vvv_u16m2(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+  return sf_vc_v_vvv_u16m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.nxv16i16.nxv16i16.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.nxv16i16.nxv16i16.i64(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_sf_vc_v_vvv_u16m4(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+  return sf_vc_v_vvv_u16m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.nxv32i16.nxv32i16.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.nxv32i16.nxv32i16.i64(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_sf_vc_v_vvv_u16m8(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
+  return sf_vc_v_vvv_u16m8(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.nxv1i32.nxv1i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.nxv1i32.nxv1i32.i64(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_sf_vc_v_vvv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return sf_vc_v_vvv_u32mf2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.nxv2i32.nxv2i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.nxv2i32.nxv2i32.i64(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_sf_vc_v_vvv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return sf_vc_v_vvv_u32m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.nxv4i32.nxv4i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.nxv4i32.nxv4i32.i64(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_sf_vc_v_vvv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return sf_vc_v_vvv_u32m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.nxv8i32.nxv8i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.nxv8i32.nxv8i32.i64(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_sf_vc_v_vvv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return sf_vc_v_vvv_u32m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.nxv16i32.nxv16i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.nxv16i32.nxv16i32.i64(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_sf_vc_v_vvv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return sf_vc_v_vvv_u32m8(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.nxv1i64.nxv1i64.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.nxv1i64.nxv1i64.i64(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_sf_vc_v_vvv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return sf_vc_v_vvv_u64m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.nxv2i64.nxv2i64.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.nxv2i64.nxv2i64.i64(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_sf_vc_v_vvv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return sf_vc_v_vvv_u64m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.nxv4i64.nxv4i64.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.nxv4i64.nxv4i64.i64(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_sf_vc_v_vvv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return sf_vc_v_vvv_u64m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.nxv8i64.nxv8i64.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.nxv8i64.nxv8i64.i64(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_sf_vc_v_vvv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return sf_vc_v_vvv_u64m8(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv1i8.i8.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv1i8.i8.i64(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) {
+  sf_vc_xvv_se_u8mf8(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv2i8.i8.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv2i8.i8.i64(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) {
+  sf_vc_xvv_se_u8mf4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv4i8.i8.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv4i8.i8.i64(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) {
+  sf_vc_xvv_se_u8mf2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv8i8.i8.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv8i8.i8.i64(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) {
+  sf_vc_xvv_se_u8m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv16i8.i8.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv16i8.i8.i64(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) {
+  sf_vc_xvv_se_u8m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv32i8.i8.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv32i8.i8.i64(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) {
+  sf_vc_xvv_se_u8m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv64i8.i8.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv64i8.i8.i64(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) {
+  sf_vc_xvv_se_u8m8(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv1i16.i16.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv1i16.i16.i64(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) {
+  sf_vc_xvv_se_u16mf4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv2i16.i16.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv2i16.i16.i64(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) {
+  sf_vc_xvv_se_u16mf2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv4i16.i16.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv4i16.i16.i64(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) {
+  sf_vc_xvv_se_u16m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv8i16.i16.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv8i16.i16.i64(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) {
+  sf_vc_xvv_se_u16m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv16i16.i16.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv16i16.i16.i64(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) {
+  sf_vc_xvv_se_u16m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv32i16.i16.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv32i16.i16.i64(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) {
+  sf_vc_xvv_se_u16m8(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv1i32.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv1i32.i32.i64(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) {
+  sf_vc_xvv_se_u32mf2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv2i32.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv2i32.i32.i64(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) {
+  sf_vc_xvv_se_u32m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv4i32.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv4i32.i32.i64(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) {
+  sf_vc_xvv_se_u32m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv8i32.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv8i32.i32.i64(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) {
+  sf_vc_xvv_se_u32m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv16i32.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.nxv16i32.i32.i64(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) {
+  sf_vc_xvv_se_u32m8(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv1i8.i8.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv1i8.i8.i64(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vc_v_xvv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xvv_se_u8mf8(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv2i8.i8.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv2i8.i8.i64(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vc_v_xvv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xvv_se_u8mf4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv4i8.i8.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv4i8.i8.i64(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vc_v_xvv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xvv_se_u8mf2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv8i8.i8.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv8i8.i8.i64(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vc_v_xvv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xvv_se_u8m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv16i8.i8.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv16i8.i8.i64(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vc_v_xvv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xvv_se_u8m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv32i8.i8.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv32i8.i8.i64(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_sf_vc_v_xvv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xvv_se_u8m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv64i8.i8.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv64i8.i8.i64(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_sf_vc_v_xvv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xvv_se_u8m8(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv1i16.i16.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv1i16.i16.i64(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_sf_vc_v_xvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xvv_se_u16mf4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv2i16.i16.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv2i16.i16.i64(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_sf_vc_v_xvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xvv_se_u16mf2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv4i16.i16.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv4i16.i16.i64(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_sf_vc_v_xvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xvv_se_u16m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv8i16.i16.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv8i16.i16.i64(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_sf_vc_v_xvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xvv_se_u16m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv16i16.i16.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv16i16.i16.i64(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_sf_vc_v_xvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xvv_se_u16m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv32i16.i16.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv32i16.i16.i64(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_sf_vc_v_xvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xvv_se_u16m8(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv1i32.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv1i32.i32.i64(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_sf_vc_v_xvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) {
+  return sf_vc_v_xvv_se_u32mf2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv2i32.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv2i32.i32.i64(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_sf_vc_v_xvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) {
+  return sf_vc_v_xvv_se_u32m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv4i32.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv4i32.i32.i64(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_sf_vc_v_xvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) {
+  return sf_vc_v_xvv_se_u32m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv8i32.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv8i32.i32.i64(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_sf_vc_v_xvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) {
+  return sf_vc_v_xvv_se_u32m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv16i32.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv16i32.i32.i64(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_sf_vc_v_xvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) {
+  return sf_vc_v_xvv_se_u32m8(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.nxv1i8.i8.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.nxv1i8.i8.i64(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vc_v_xvv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xvv_u8mf8(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.nxv2i8.i8.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.nxv2i8.i8.i64(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vc_v_xvv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xvv_u8mf4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.nxv4i8.i8.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.nxv4i8.i8.i64(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vc_v_xvv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xvv_u8mf2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.nxv8i8.i8.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.nxv8i8.i8.i64(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vc_v_xvv_u8m1(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xvv_u8m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.nxv16i8.i8.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.nxv16i8.i8.i64(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vc_v_xvv_u8m2(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xvv_u8m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.nxv32i8.i8.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.nxv32i8.i8.i64(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_sf_vc_v_xvv_u8m4(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xvv_u8m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.nxv64i8.i8.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.nxv64i8.i8.i64(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_sf_vc_v_xvv_u8m8(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xvv_u8m8(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.nxv1i16.i16.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.nxv1i16.i16.i64(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_sf_vc_v_xvv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xvv_u16mf4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.nxv2i16.i16.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.nxv2i16.i16.i64(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_sf_vc_v_xvv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xvv_u16mf2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.nxv4i16.i16.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.nxv4i16.i16.i64(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_sf_vc_v_xvv_u16m1(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xvv_u16m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.nxv8i16.i16.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.nxv8i16.i16.i64(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_sf_vc_v_xvv_u16m2(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xvv_u16m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.nxv16i16.i16.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.nxv16i16.i16.i64(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_sf_vc_v_xvv_u16m4(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xvv_u16m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.nxv32i16.i16.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.nxv32i16.i16.i64(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_sf_vc_v_xvv_u16m8(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xvv_u16m8(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.nxv1i32.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.nxv1i32.i32.i64(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_sf_vc_v_xvv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) {
+  return sf_vc_v_xvv_u32mf2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.nxv2i32.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.nxv2i32.i32.i64(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_sf_vc_v_xvv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) {
+  return sf_vc_v_xvv_u32m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.nxv4i32.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.nxv4i32.i32.i64(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_sf_vc_v_xvv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) {
+  return sf_vc_v_xvv_u32m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.nxv8i32.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.nxv8i32.i32.i64(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_sf_vc_v_xvv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) {
+  return sf_vc_v_xvv_u32m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.nxv16i32.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.nxv16i32.i32.i64(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_sf_vc_v_xvv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) {
+  return sf_vc_v_xvv_u32m8(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv1i8.i32.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv1i8.i32.i64(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) {
+  sf_vc_ivv_se_u8mf8(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv2i8.i32.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv2i8.i32.i64(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) {
+  sf_vc_ivv_se_u8mf4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv4i8.i32.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv4i8.i32.i64(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) {
+  sf_vc_ivv_se_u8mf2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv8i8.i32.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv8i8.i32.i64(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) {
+  sf_vc_ivv_se_u8m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv16i8.i32.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv16i8.i32.i64(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) {
+  sf_vc_ivv_se_u8m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv32i8.i32.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv32i8.i32.i64(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) {
+  sf_vc_ivv_se_u8m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv64i8.i32.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv64i8.i32.i64(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) {
+  sf_vc_ivv_se_u8m8(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv1i16.i32.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv1i16.i32.i64(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) {
+  sf_vc_ivv_se_u16mf4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv2i16.i32.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv2i16.i32.i64(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) {
+  sf_vc_ivv_se_u16mf2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv4i16.i32.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv4i16.i32.i64(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) {
+  sf_vc_ivv_se_u16m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv8i16.i32.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv8i16.i32.i64(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) {
+  sf_vc_ivv_se_u16m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv16i16.i32.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv16i16.i32.i64(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) {
+  sf_vc_ivv_se_u16m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv32i16.i32.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv32i16.i32.i64(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) {
+  sf_vc_ivv_se_u16m8(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv1i32.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv1i32.i32.i64(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  sf_vc_ivv_se_u32mf2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv2i32.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv2i32.i32.i64(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  sf_vc_ivv_se_u32m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv4i32.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv4i32.i32.i64(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  sf_vc_ivv_se_u32m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv8i32.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv8i32.i32.i64(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  sf_vc_ivv_se_u32m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv16i32.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv16i32.i32.i64(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  sf_vc_ivv_se_u32m8(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv1i64.i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv1i64.i32.i64(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) {
+  sf_vc_ivv_se_u64m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv2i64.i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv2i64.i32.i64(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) {
+  sf_vc_ivv_se_u64m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv4i64.i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv4i64.i32.i64(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) {
+  sf_vc_ivv_se_u64m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv8i64.i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.nxv8i64.i32.i64(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) {
+  sf_vc_ivv_se_u64m8(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv1i8.i32.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv1i8.i32.i64(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vc_v_ivv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) {
+  return sf_vc_v_ivv_se_u8mf8(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv2i8.i32.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv2i8.i32.i64(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vc_v_ivv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) {
+  return sf_vc_v_ivv_se_u8mf4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv4i8.i32.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv4i8.i32.i64(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vc_v_ivv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) {
+  return sf_vc_v_ivv_se_u8mf2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv8i8.i32.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv8i8.i32.i64(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vc_v_ivv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) {
+  return sf_vc_v_ivv_se_u8m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv16i8.i32.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv16i8.i32.i64(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vc_v_ivv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) {
+  return sf_vc_v_ivv_se_u8m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv32i8.i32.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv32i8.i32.i64(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_sf_vc_v_ivv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) {
+  return sf_vc_v_ivv_se_u8m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv64i8.i32.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv64i8.i32.i64(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_sf_vc_v_ivv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) {
+  return sf_vc_v_ivv_se_u8m8(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv1i16.i32.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv1i16.i32.i64(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_sf_vc_v_ivv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) {
+  return sf_vc_v_ivv_se_u16mf4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv2i16.i32.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv2i16.i32.i64(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_sf_vc_v_ivv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) {
+  return sf_vc_v_ivv_se_u16mf2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv4i16.i32.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv4i16.i32.i64(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_sf_vc_v_ivv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) {
+  return sf_vc_v_ivv_se_u16m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv8i16.i32.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv8i16.i32.i64(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_sf_vc_v_ivv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) {
+  return sf_vc_v_ivv_se_u16m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv16i16.i32.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv16i16.i32.i64(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_sf_vc_v_ivv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) {
+  return sf_vc_v_ivv_se_u16m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv32i16.i32.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv32i16.i32.i64(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_sf_vc_v_ivv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) {
+  return sf_vc_v_ivv_se_u16m8(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv1i32.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv1i32.i32.i64(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_sf_vc_v_ivv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return sf_vc_v_ivv_se_u32mf2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv2i32.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv2i32.i32.i64(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_sf_vc_v_ivv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return sf_vc_v_ivv_se_u32m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv4i32.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv4i32.i32.i64(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_sf_vc_v_ivv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return sf_vc_v_ivv_se_u32m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv8i32.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv8i32.i32.i64(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_sf_vc_v_ivv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return sf_vc_v_ivv_se_u32m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv16i32.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv16i32.i32.i64(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_sf_vc_v_ivv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return sf_vc_v_ivv_se_u32m8(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv1i64.i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv1i64.i32.i64(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_sf_vc_v_ivv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) {
+  return sf_vc_v_ivv_se_u64m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv2i64.i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv2i64.i32.i64(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_sf_vc_v_ivv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) {
+  return sf_vc_v_ivv_se_u64m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv4i64.i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv4i64.i32.i64(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_sf_vc_v_ivv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) {
+  return sf_vc_v_ivv_se_u64m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv8i64.i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv8i64.i32.i64(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_sf_vc_v_ivv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) {
+  return sf_vc_v_ivv_se_u64m8(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.nxv1i8.i32.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.nxv1i8.i32.i64(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vc_v_ivv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) {
+  return sf_vc_v_ivv_u8mf8(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.nxv2i8.i32.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.nxv2i8.i32.i64(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vc_v_ivv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) {
+  return sf_vc_v_ivv_u8mf4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.nxv4i8.i32.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.nxv4i8.i32.i64(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vc_v_ivv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) {
+  return sf_vc_v_ivv_u8mf2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.nxv8i8.i32.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.nxv8i8.i32.i64(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vc_v_ivv_u8m1(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) {
+  return sf_vc_v_ivv_u8m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.nxv16i8.i32.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.nxv16i8.i32.i64(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vc_v_ivv_u8m2(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) {
+  return sf_vc_v_ivv_u8m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.nxv32i8.i32.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.nxv32i8.i32.i64(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_sf_vc_v_ivv_u8m4(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) {
+  return sf_vc_v_ivv_u8m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.nxv64i8.i32.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.nxv64i8.i32.i64(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_sf_vc_v_ivv_u8m8(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) {
+  return sf_vc_v_ivv_u8m8(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.nxv1i16.i32.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.nxv1i16.i32.i64(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_sf_vc_v_ivv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) {
+  return sf_vc_v_ivv_u16mf4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.nxv2i16.i32.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.nxv2i16.i32.i64(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_sf_vc_v_ivv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) {
+  return sf_vc_v_ivv_u16mf2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.nxv4i16.i32.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.nxv4i16.i32.i64(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_sf_vc_v_ivv_u16m1(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) {
+  return sf_vc_v_ivv_u16m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.nxv8i16.i32.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.nxv8i16.i32.i64(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_sf_vc_v_ivv_u16m2(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) {
+  return sf_vc_v_ivv_u16m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.nxv16i16.i32.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.nxv16i16.i32.i64(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_sf_vc_v_ivv_u16m4(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) {
+  return sf_vc_v_ivv_u16m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.nxv32i16.i32.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.nxv32i16.i32.i64(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_sf_vc_v_ivv_u16m8(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) {
+  return sf_vc_v_ivv_u16m8(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.nxv1i32.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.nxv1i32.i32.i64(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_sf_vc_v_ivv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return sf_vc_v_ivv_u32mf2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.nxv2i32.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.nxv2i32.i32.i64(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_sf_vc_v_ivv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return sf_vc_v_ivv_u32m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.nxv4i32.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.nxv4i32.i32.i64(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_sf_vc_v_ivv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return sf_vc_v_ivv_u32m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.nxv8i32.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.nxv8i32.i32.i64(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_sf_vc_v_ivv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return sf_vc_v_ivv_u32m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.nxv16i32.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.nxv16i32.i32.i64(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_sf_vc_v_ivv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return sf_vc_v_ivv_u32m8(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.nxv1i64.i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.nxv1i64.i32.i64(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_sf_vc_v_ivv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) {
+  return sf_vc_v_ivv_u64m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.nxv2i64.i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.nxv2i64.i32.i64(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_sf_vc_v_ivv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) {
+  return sf_vc_v_ivv_u64m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.nxv4i64.i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.nxv4i64.i32.i64(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_sf_vc_v_ivv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) {
+  return sf_vc_v_ivv_u64m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.nxv8i64.i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.nxv8i64.i32.i64(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_sf_vc_v_ivv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) {
+  return sf_vc_v_ivv_u64m8(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.nxv1i16.f16.i32(i32 1, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.nxv1i16.f16.i64(i32 1, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, _Float16 fs1, size_t vl) {
+  sf_vc_fvv_se_u16mf4(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.nxv2i16.f16.i32(i32 1, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.nxv2i16.f16.i64(i32 1, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, _Float16 fs1, size_t vl) {
+  sf_vc_fvv_se_u16mf2(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.nxv4i16.f16.i32(i32 1, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.nxv4i16.f16.i64(i32 1, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, _Float16 fs1, size_t vl) {
+  sf_vc_fvv_se_u16m1(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.nxv8i16.f16.i32(i32 1, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.nxv8i16.f16.i64(i32 1, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, _Float16 fs1, size_t vl) {
+  sf_vc_fvv_se_u16m2(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.nxv16i16.f16.i32(i32 1, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.nxv16i16.f16.i64(i32 1, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, _Float16 fs1, size_t vl) {
+  sf_vc_fvv_se_u16m4(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.nxv32i16.f16.i32(i32 1, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.nxv32i16.f16.i64(i32 1, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, _Float16 fs1, size_t vl) {
+  sf_vc_fvv_se_u16m8(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.nxv1i32.f32.i32(i32 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.nxv1i32.f32.i64(i32 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, float fs1, size_t vl) {
+  sf_vc_fvv_se_u32mf2(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.nxv2i32.f32.i32(i32 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.nxv2i32.f32.i64(i32 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, float fs1, size_t vl) {
+  sf_vc_fvv_se_u32m1(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.nxv4i32.f32.i32(i32 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.nxv4i32.f32.i64(i32 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, float fs1, size_t vl) {
+  sf_vc_fvv_se_u32m2(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.nxv8i32.f32.i32(i32 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.nxv8i32.f32.i64(i32 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, float fs1, size_t vl) {
+  sf_vc_fvv_se_u32m4(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.nxv16i32.f32.i32(i32 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.nxv16i32.f32.i64(i32 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, float fs1, size_t vl) {
+  sf_vc_fvv_se_u32m8(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.nxv1i64.f64.i32(i32 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.nxv1i64.f64.i64(i32 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, double fs1, size_t vl) {
+  sf_vc_fvv_se_u64m1(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.nxv2i64.f64.i32(i32 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.nxv2i64.f64.i64(i32 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, double fs1, size_t vl) {
+  sf_vc_fvv_se_u64m2(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.nxv4i64.f64.i32(i32 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.nxv4i64.f64.i64(i32 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, double fs1, size_t vl) {
+  sf_vc_fvv_se_u64m4(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.nxv8i64.f64.i32(i32 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.nxv8i64.f64.i64(i32 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, double fs1, size_t vl) {
+  sf_vc_fvv_se_u64m8(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv1i16.f16.i32(i32 1, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv1i16.f16.i64(i32 1, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_sf_vc_v_fvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fvv_se_u16mf4(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv2i16.f16.i32(i32 1, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv2i16.f16.i64(i32 1, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_sf_vc_v_fvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fvv_se_u16mf2(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv4i16.f16.i32(i32 1, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv4i16.f16.i64(i32 1, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_sf_vc_v_fvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fvv_se_u16m1(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv8i16.f16.i32(i32 1, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv8i16.f16.i64(i32 1, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_sf_vc_v_fvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fvv_se_u16m2(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv16i16.f16.i32(i32 1, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv16i16.f16.i64(i32 1, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_sf_vc_v_fvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fvv_se_u16m4(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv32i16.f16.i32(i32 1, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv32i16.f16.i64(i32 1, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_sf_vc_v_fvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fvv_se_u16m8(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv1i32.f32.i32(i32 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv1i32.f32.i64(i32 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_sf_vc_v_fvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, float fs1, size_t vl) {
+  return sf_vc_v_fvv_se_u32mf2(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv2i32.f32.i32(i32 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv2i32.f32.i64(i32 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_sf_vc_v_fvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, float fs1, size_t vl) {
+  return sf_vc_v_fvv_se_u32m1(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv4i32.f32.i32(i32 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv4i32.f32.i64(i32 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_sf_vc_v_fvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, float fs1, size_t vl) {
+  return sf_vc_v_fvv_se_u32m2(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv8i32.f32.i32(i32 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv8i32.f32.i64(i32 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_sf_vc_v_fvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, float fs1, size_t vl) {
+  return sf_vc_v_fvv_se_u32m4(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv16i32.f32.i32(i32 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv16i32.f32.i64(i32 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_sf_vc_v_fvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, float fs1, size_t vl) {
+  return sf_vc_v_fvv_se_u32m8(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv1i64.f64.i32(i32 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv1i64.f64.i64(i32 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_sf_vc_v_fvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, double fs1, size_t vl) {
+  return sf_vc_v_fvv_se_u64m1(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv2i64.f64.i32(i32 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv2i64.f64.i64(i32 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_sf_vc_v_fvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, double fs1, size_t vl) {
+  return sf_vc_v_fvv_se_u64m2(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv4i64.f64.i32(i32 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv4i64.f64.i64(i32 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_sf_vc_v_fvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, double fs1, size_t vl) {
+  return sf_vc_v_fvv_se_u64m4(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv8i64.f64.i32(i32 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv8i64.f64.i64(i32 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_sf_vc_v_fvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, double fs1, size_t vl) {
+  return sf_vc_v_fvv_se_u64m8(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.nxv1i16.f16.i32(i32 1, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.nxv1i16.f16.i64(i32 1, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_sf_vc_v_fvv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fvv_u16mf4(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.nxv2i16.f16.i32(i32 1, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.nxv2i16.f16.i64(i32 1, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_sf_vc_v_fvv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fvv_u16mf2(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.nxv4i16.f16.i32(i32 1, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.nxv4i16.f16.i64(i32 1, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_sf_vc_v_fvv_u16m1(vuint16m1_t vd, vuint16m1_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fvv_u16m1(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.nxv8i16.f16.i32(i32 1, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.nxv8i16.f16.i64(i32 1, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_sf_vc_v_fvv_u16m2(vuint16m2_t vd, vuint16m2_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fvv_u16m2(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.nxv16i16.f16.i32(i32 1, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.nxv16i16.f16.i64(i32 1, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_sf_vc_v_fvv_u16m4(vuint16m4_t vd, vuint16m4_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fvv_u16m4(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.nxv32i16.f16.i32(i32 1, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.nxv32i16.f16.i64(i32 1, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_sf_vc_v_fvv_u16m8(vuint16m8_t vd, vuint16m8_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fvv_u16m8(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvv.nxv1i32.f32.i32(i32 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvv.nxv1i32.f32.i64(i32 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_sf_vc_v_fvv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, float fs1, size_t vl) {
+  return sf_vc_v_fvv_u32mf2(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvv.nxv2i32.f32.i32(i32 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvv.nxv2i32.f32.i64(i32 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_sf_vc_v_fvv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, float fs1, size_t vl) {
+  return sf_vc_v_fvv_u32m1(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvv.nxv4i32.f32.i32(i32 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvv.nxv4i32.f32.i64(i32 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_sf_vc_v_fvv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, float fs1, size_t vl) {
+  return sf_vc_v_fvv_u32m2(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvv.nxv8i32.f32.i32(i32 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvv.nxv8i32.f32.i64(i32 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_sf_vc_v_fvv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, float fs1, size_t vl) {
+  return sf_vc_v_fvv_u32m4(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvv.nxv16i32.f32.i32(i32 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvv.nxv16i32.f32.i64(i32 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_sf_vc_v_fvv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, float fs1, size_t vl) {
+  return sf_vc_v_fvv_u32m8(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvv.nxv1i64.f64.i32(i32 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvv.nxv1i64.f64.i64(i32 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_sf_vc_v_fvv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, double fs1, size_t vl) {
+  return sf_vc_v_fvv_u64m1(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvv.nxv2i64.f64.i32(i32 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvv.nxv2i64.f64.i64(i32 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_sf_vc_v_fvv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, double fs1, size_t vl) {
+  return sf_vc_v_fvv_u64m2(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvv.nxv4i64.f64.i32(i32 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvv.nxv4i64.f64.i64(i32 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_sf_vc_v_fvv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, double fs1, size_t vl) {
+  return sf_vc_v_fvv_u64m4(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvv.nxv8i64.f64.i32(i32 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvv.nxv8i64.f64.i64(i32 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_sf_vc_v_fvv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, double fs1, size_t vl) {
+  return sf_vc_v_fvv_u64m8(p27, vd, vs2, fs1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/xsfvcp-xvw.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/xsfvcp-xvw.c
new file mode 100644
index 0000000000000..4688cc083ba43
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/xsfvcp-xvw.c
@@ -0,0 +1,2277 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple riscv32 -target-feature +v -target-feature +zfh -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+const int p27_26 = 3;
+const int p27 = 1;
+const int simm5 = 10;
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvw_se_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.nxv1i16.nxv1i8.nxv1i8.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvw_se_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.nxv1i16.nxv1i8.nxv1i8.i64(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvw_se_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  sf_vc_vvw_se_u8mf8(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvw_se_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.nxv2i16.nxv2i8.nxv2i8.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvw_se_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.nxv2i16.nxv2i8.nxv2i8.i64(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvw_se_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+  sf_vc_vvw_se_u8mf4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvw_se_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.nxv4i16.nxv4i8.nxv4i8.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvw_se_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.nxv4i16.nxv4i8.nxv4i8.i64(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvw_se_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  sf_vc_vvw_se_u8mf2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvw_se_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.nxv8i16.nxv8i8.nxv8i8.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvw_se_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.nxv8i16.nxv8i8.nxv8i8.i64(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvw_se_u8m1(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  sf_vc_vvw_se_u8m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvw_se_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.nxv16i16.nxv16i8.nxv16i8.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvw_se_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.nxv16i16.nxv16i8.nxv16i8.i64(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvw_se_u8m2(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+  sf_vc_vvw_se_u8m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvw_se_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.nxv32i16.nxv32i8.nxv32i8.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvw_se_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.nxv32i16.nxv32i8.nxv32i8.i64(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvw_se_u8m4(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+  sf_vc_vvw_se_u8m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvw_se_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.nxv1i32.nxv1i16.nxv1i16.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvw_se_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.nxv1i32.nxv1i16.nxv1i16.i64(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+  sf_vc_vvw_se_u16mf4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvw_se_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.nxv2i32.nxv2i16.nxv2i16.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvw_se_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.nxv2i32.nxv2i16.nxv2i16.i64(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+  sf_vc_vvw_se_u16mf2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvw_se_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.nxv4i32.nxv4i16.nxv4i16.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvw_se_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.nxv4i32.nxv4i16.nxv4i16.i64(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+  sf_vc_vvw_se_u16m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvw_se_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.nxv8i32.nxv8i16.nxv8i16.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvw_se_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.nxv8i32.nxv8i16.nxv8i16.i64(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+  sf_vc_vvw_se_u16m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvw_se_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.nxv16i32.nxv16i16.nxv16i16.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvw_se_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.nxv16i32.nxv16i16.nxv16i16.i64(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+  sf_vc_vvw_se_u16m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvw_se_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.nxv1i64.nxv1i32.nxv1i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvw_se_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.nxv1i64.nxv1i32.nxv1i32.i64(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  sf_vc_vvw_se_u32mf2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvw_se_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.nxv2i64.nxv2i32.nxv2i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvw_se_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.nxv2i64.nxv2i32.nxv2i32.i64(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  sf_vc_vvw_se_u32m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvw_se_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.nxv4i64.nxv4i32.nxv4i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvw_se_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.nxv4i64.nxv4i32.nxv4i32.i64(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  sf_vc_vvw_se_u32m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_vvw_se_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.nxv8i64.nxv8i32.nxv8i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_vvw_se_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvw.se.nxv8i64.nxv8i32.nxv8i32.i64(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_vvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  sf_vc_vvw_se_u32m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv1i16.nxv1i8.nxv1i8.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv1i16.nxv1i8.nxv1i8.i64(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_sf_vc_v_vvw_se_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return sf_vc_v_vvw_se_u8mf8(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv2i16.nxv2i8.nxv2i8.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv2i16.nxv2i8.nxv2i8.i64(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_sf_vc_v_vvw_se_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+  return sf_vc_v_vvw_se_u8mf4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv4i16.nxv4i8.nxv4i8.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv4i16.nxv4i8.nxv4i8.i64(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_sf_vc_v_vvw_se_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return sf_vc_v_vvw_se_u8mf2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv8i16.nxv8i8.nxv8i8.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv8i16.nxv8i8.nxv8i8.i64(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_sf_vc_v_vvw_se_u8m1(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return sf_vc_v_vvw_se_u8m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv16i16.nxv16i8.nxv16i8.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv16i16.nxv16i8.nxv16i8.i64(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_sf_vc_v_vvw_se_u8m2(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+  return sf_vc_v_vvw_se_u8m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv32i16.nxv32i8.nxv32i8.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv32i16.nxv32i8.nxv32i8.i64(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_sf_vc_v_vvw_se_u8m4(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+  return sf_vc_v_vvw_se_u8m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv1i32.nxv1i16.nxv1i16.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv1i32.nxv1i16.nxv1i16.i64(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_sf_vc_v_vvw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+  return sf_vc_v_vvw_se_u16mf4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv2i32.nxv2i16.nxv2i16.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv2i32.nxv2i16.nxv2i16.i64(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_sf_vc_v_vvw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+  return sf_vc_v_vvw_se_u16mf2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv4i32.nxv4i16.nxv4i16.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv4i32.nxv4i16.nxv4i16.i64(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_sf_vc_v_vvw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+  return sf_vc_v_vvw_se_u16m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv8i32.nxv8i16.nxv8i16.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv8i32.nxv8i16.nxv8i16.i64(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_sf_vc_v_vvw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+  return sf_vc_v_vvw_se_u16m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv16i32.nxv16i16.nxv16i16.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv16i32.nxv16i16.nxv16i16.i64(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_sf_vc_v_vvw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+  return sf_vc_v_vvw_se_u16m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv1i64.nxv1i32.nxv1i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv1i64.nxv1i32.nxv1i32.i64(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_sf_vc_v_vvw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return sf_vc_v_vvw_se_u32mf2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv2i64.nxv2i32.nxv2i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv2i64.nxv2i32.nxv2i32.i64(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_sf_vc_v_vvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return sf_vc_v_vvw_se_u32m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv4i64.nxv4i32.nxv4i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv4i64.nxv4i32.nxv4i32.i64(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_sf_vc_v_vvw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return sf_vc_v_vvw_se_u32m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv8i64.nxv8i32.nxv8i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv8i64.nxv8i32.nxv8i32.i64(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_sf_vc_v_vvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return sf_vc_v_vvw_se_u32m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvw.nxv1i16.nxv1i8.nxv1i8.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvw.nxv1i16.nxv1i8.nxv1i8.i64(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_sf_vc_v_vvw_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return sf_vc_v_vvw_u8mf8(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvw.nxv2i16.nxv2i8.nxv2i8.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvw.nxv2i16.nxv2i8.nxv2i8.i64(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_sf_vc_v_vvw_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+  return sf_vc_v_vvw_u8mf4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvw.nxv4i16.nxv4i8.nxv4i8.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvw.nxv4i16.nxv4i8.nxv4i8.i64(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_sf_vc_v_vvw_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return sf_vc_v_vvw_u8mf2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvw.nxv8i16.nxv8i8.nxv8i8.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvw.nxv8i16.nxv8i8.nxv8i8.i64(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_sf_vc_v_vvw_u8m1(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return sf_vc_v_vvw_u8m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvw.nxv16i16.nxv16i8.nxv16i8.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvw.nxv16i16.nxv16i8.nxv16i8.i64(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_sf_vc_v_vvw_u8m2(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+  return sf_vc_v_vvw_u8m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvw.nxv32i16.nxv32i8.nxv32i8.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvw.nxv32i16.nxv32i8.nxv32i8.i64(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_sf_vc_v_vvw_u8m4(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+  return sf_vc_v_vvw_u8m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvw.nxv1i32.nxv1i16.nxv1i16.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvw.nxv1i32.nxv1i16.nxv1i16.i64(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_sf_vc_v_vvw_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+  return sf_vc_v_vvw_u16mf4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvw.nxv2i32.nxv2i16.nxv2i16.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvw.nxv2i32.nxv2i16.nxv2i16.i64(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_sf_vc_v_vvw_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+  return sf_vc_v_vvw_u16mf2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvw.nxv4i32.nxv4i16.nxv4i16.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvw.nxv4i32.nxv4i16.nxv4i16.i64(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_sf_vc_v_vvw_u16m1(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+  return sf_vc_v_vvw_u16m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvw.nxv8i32.nxv8i16.nxv8i16.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvw.nxv8i32.nxv8i16.nxv8i16.i64(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_sf_vc_v_vvw_u16m2(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+  return sf_vc_v_vvw_u16m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvw.nxv16i32.nxv16i16.nxv16i16.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvw.nxv16i32.nxv16i16.nxv16i16.i64(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_sf_vc_v_vvw_u16m4(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+  return sf_vc_v_vvw_u16m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvw.nxv1i64.nxv1i32.nxv1i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvw.nxv1i64.nxv1i32.nxv1i32.i64(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_sf_vc_v_vvw_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return sf_vc_v_vvw_u32mf2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvw.nxv2i64.nxv2i32.nxv2i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvw.nxv2i64.nxv2i32.nxv2i32.i64(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_sf_vc_v_vvw_u32m1(vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return sf_vc_v_vvw_u32m1(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvw.nxv4i64.nxv4i32.nxv4i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvw.nxv4i64.nxv4i32.nxv4i32.i64(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_sf_vc_v_vvw_u32m2(vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return sf_vc_v_vvw_u32m2(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvw.nxv8i64.nxv8i32.nxv8i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvw.nxv8i64.nxv8i32.nxv8i32.i64(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_sf_vc_v_vvw_u32m4(vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return sf_vc_v_vvw_u32m4(p27_26, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvw_se_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.nxv1i16.nxv1i8.i8.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvw_se_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.nxv1i16.nxv1i8.i8.i64(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvw_se_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) {
+  sf_vc_xvw_se_u8mf8(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvw_se_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.nxv2i16.nxv2i8.i8.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvw_se_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.nxv2i16.nxv2i8.i8.i64(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvw_se_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) {
+  sf_vc_xvw_se_u8mf4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvw_se_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.nxv4i16.nxv4i8.i8.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvw_se_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.nxv4i16.nxv4i8.i8.i64(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvw_se_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) {
+  sf_vc_xvw_se_u8mf2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvw_se_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.nxv8i16.nxv8i8.i8.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvw_se_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.nxv8i16.nxv8i8.i8.i64(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvw_se_u8m1(vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) {
+  sf_vc_xvw_se_u8m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvw_se_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.nxv16i16.nxv16i8.i8.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvw_se_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.nxv16i16.nxv16i8.i8.i64(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvw_se_u8m2(vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) {
+  sf_vc_xvw_se_u8m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvw_se_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.nxv32i16.nxv32i8.i8.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvw_se_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.nxv32i16.nxv32i8.i8.i64(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvw_se_u8m4(vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) {
+  sf_vc_xvw_se_u8m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvw_se_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.nxv1i32.nxv1i16.i16.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvw_se_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.nxv1i32.nxv1i16.i16.i64(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) {
+  sf_vc_xvw_se_u16mf4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvw_se_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.nxv2i32.nxv2i16.i16.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvw_se_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.nxv2i32.nxv2i16.i16.i64(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) {
+  sf_vc_xvw_se_u16mf2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvw_se_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.nxv4i32.nxv4i16.i16.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvw_se_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.nxv4i32.nxv4i16.i16.i64(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) {
+  sf_vc_xvw_se_u16m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvw_se_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.nxv8i32.nxv8i16.i16.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvw_se_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.nxv8i32.nxv8i16.i16.i64(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) {
+  sf_vc_xvw_se_u16m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvw_se_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.nxv16i32.nxv16i16.i16.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvw_se_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.nxv16i32.nxv16i16.i16.i64(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) {
+  sf_vc_xvw_se_u16m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvw_se_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.nxv1i64.nxv1i32.i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvw_se_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.nxv1i64.nxv1i32.i32.i64(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) {
+  sf_vc_xvw_se_u32mf2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvw_se_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.nxv2i64.nxv2i32.i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvw_se_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.nxv2i64.nxv2i32.i32.i64(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) {
+  sf_vc_xvw_se_u32m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvw_se_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.nxv4i64.nxv4i32.i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvw_se_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.nxv4i64.nxv4i32.i32.i64(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) {
+  sf_vc_xvw_se_u32m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_xvw_se_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.nxv8i64.nxv8i32.i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_xvw_se_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvw.se.nxv8i64.nxv8i32.i32.i64(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_xvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) {
+  sf_vc_xvw_se_u32m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv1i16.nxv1i8.i8.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv1i16.nxv1i8.i8.i64(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_sf_vc_v_xvw_se_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xvw_se_u8mf8(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv2i16.nxv2i8.i8.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv2i16.nxv2i8.i8.i64(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_sf_vc_v_xvw_se_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xvw_se_u8mf4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv4i16.nxv4i8.i8.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv4i16.nxv4i8.i8.i64(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_sf_vc_v_xvw_se_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xvw_se_u8mf2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv8i16.nxv8i8.i8.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv8i16.nxv8i8.i8.i64(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_sf_vc_v_xvw_se_u8m1(vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xvw_se_u8m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv16i16.nxv16i8.i8.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv16i16.nxv16i8.i8.i64(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_sf_vc_v_xvw_se_u8m2(vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xvw_se_u8m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv32i16.nxv32i8.i8.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv32i16.nxv32i8.i8.i64(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_sf_vc_v_xvw_se_u8m4(vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xvw_se_u8m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv1i32.nxv1i16.i16.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv1i32.nxv1i16.i16.i64(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_sf_vc_v_xvw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xvw_se_u16mf4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv2i32.nxv2i16.i16.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv2i32.nxv2i16.i16.i64(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_sf_vc_v_xvw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xvw_se_u16mf2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv4i32.nxv4i16.i16.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv4i32.nxv4i16.i16.i64(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_sf_vc_v_xvw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xvw_se_u16m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv8i32.nxv8i16.i16.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv8i32.nxv8i16.i16.i64(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_sf_vc_v_xvw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xvw_se_u16m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv16i32.nxv16i16.i16.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv16i32.nxv16i16.i16.i64(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_sf_vc_v_xvw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xvw_se_u16m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv1i64.nxv1i32.i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv1i64.nxv1i32.i32.i64(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_sf_vc_v_xvw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) {
+  return sf_vc_v_xvw_se_u32mf2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv2i64.nxv2i32.i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv2i64.nxv2i32.i32.i64(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_sf_vc_v_xvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) {
+  return sf_vc_v_xvw_se_u32m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv4i64.nxv4i32.i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv4i64.nxv4i32.i32.i64(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_sf_vc_v_xvw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) {
+  return sf_vc_v_xvw_se_u32m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv8i64.nxv8i32.i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv8i64.nxv8i32.i32.i64(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_sf_vc_v_xvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) {
+  return sf_vc_v_xvw_se_u32m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvw.nxv1i16.nxv1i8.i8.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvw.nxv1i16.nxv1i8.i8.i64(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_sf_vc_v_xvw_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xvw_u8mf8(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvw.nxv2i16.nxv2i8.i8.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvw.nxv2i16.nxv2i8.i8.i64(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_sf_vc_v_xvw_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xvw_u8mf4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvw.nxv4i16.nxv4i8.i8.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvw.nxv4i16.nxv4i8.i8.i64(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_sf_vc_v_xvw_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xvw_u8mf2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvw.nxv8i16.nxv8i8.i8.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvw.nxv8i16.nxv8i8.i8.i64(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_sf_vc_v_xvw_u8m1(vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xvw_u8m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvw.nxv16i16.nxv16i8.i8.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvw.nxv16i16.nxv16i8.i8.i64(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_sf_vc_v_xvw_u8m2(vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xvw_u8m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvw.nxv32i16.nxv32i8.i8.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvw.nxv32i16.nxv32i8.i8.i64(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_sf_vc_v_xvw_u8m4(vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) {
+  return sf_vc_v_xvw_u8m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvw.nxv1i32.nxv1i16.i16.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvw.nxv1i32.nxv1i16.i16.i64(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_sf_vc_v_xvw_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xvw_u16mf4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvw.nxv2i32.nxv2i16.i16.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvw.nxv2i32.nxv2i16.i16.i64(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_sf_vc_v_xvw_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xvw_u16mf2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvw.nxv4i32.nxv4i16.i16.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvw.nxv4i32.nxv4i16.i16.i64(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_sf_vc_v_xvw_u16m1(vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xvw_u16m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvw.nxv8i32.nxv8i16.i16.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvw.nxv8i32.nxv8i16.i16.i64(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_sf_vc_v_xvw_u16m2(vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xvw_u16m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvw.nxv16i32.nxv16i16.i16.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvw.nxv16i32.nxv16i16.i16.i64(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_sf_vc_v_xvw_u16m4(vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) {
+  return sf_vc_v_xvw_u16m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvw.nxv1i64.nxv1i32.i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvw.nxv1i64.nxv1i32.i32.i64(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_sf_vc_v_xvw_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) {
+  return sf_vc_v_xvw_u32mf2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvw.nxv2i64.nxv2i32.i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvw.nxv2i64.nxv2i32.i32.i64(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_sf_vc_v_xvw_u32m1(vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) {
+  return sf_vc_v_xvw_u32m1(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvw.nxv4i64.nxv4i32.i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvw.nxv4i64.nxv4i32.i32.i64(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_sf_vc_v_xvw_u32m2(vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) {
+  return sf_vc_v_xvw_u32m2(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvw.nxv8i64.nxv8i32.i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvw.nxv8i64.nxv8i32.i32.i64(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_sf_vc_v_xvw_u32m4(vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) {
+  return sf_vc_v_xvw_u32m4(p27_26, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivw_se_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.nxv1i16.nxv1i8.i32.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivw_se_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.nxv1i16.nxv1i8.i32.i64(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivw_se_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) {
+  sf_vc_ivw_se_u8mf8(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivw_se_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.nxv2i16.nxv2i8.i32.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivw_se_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.nxv2i16.nxv2i8.i32.i64(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivw_se_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) {
+  sf_vc_ivw_se_u8mf4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivw_se_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.nxv4i16.nxv4i8.i32.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivw_se_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.nxv4i16.nxv4i8.i32.i64(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivw_se_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) {
+  sf_vc_ivw_se_u8mf2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivw_se_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.nxv8i16.nxv8i8.i32.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivw_se_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.nxv8i16.nxv8i8.i32.i64(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivw_se_u8m1(vuint16m2_t vd, vuint8m1_t vs2, size_t vl) {
+  sf_vc_ivw_se_u8m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivw_se_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.nxv16i16.nxv16i8.i32.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivw_se_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.nxv16i16.nxv16i8.i32.i64(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivw_se_u8m2(vuint16m4_t vd, vuint8m2_t vs2, size_t vl) {
+  sf_vc_ivw_se_u8m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivw_se_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.nxv32i16.nxv32i8.i32.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivw_se_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.nxv32i16.nxv32i8.i32.i64(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivw_se_u8m4(vuint16m8_t vd, vuint8m4_t vs2, size_t vl) {
+  sf_vc_ivw_se_u8m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivw_se_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.nxv1i32.nxv1i16.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivw_se_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.nxv1i32.nxv1i16.i32.i64(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) {
+  sf_vc_ivw_se_u16mf4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivw_se_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.nxv2i32.nxv2i16.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivw_se_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.nxv2i32.nxv2i16.i32.i64(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) {
+  sf_vc_ivw_se_u16mf2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivw_se_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.nxv4i32.nxv4i16.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivw_se_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.nxv4i32.nxv4i16.i32.i64(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, size_t vl) {
+  sf_vc_ivw_se_u16m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivw_se_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.nxv8i32.nxv8i16.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivw_se_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.nxv8i32.nxv8i16.i32.i64(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, size_t vl) {
+  sf_vc_ivw_se_u16m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivw_se_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.nxv16i32.nxv16i16.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivw_se_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.nxv16i32.nxv16i16.i32.i64(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, size_t vl) {
+  sf_vc_ivw_se_u16m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivw_se_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.nxv1i64.nxv1i32.i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivw_se_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.nxv1i64.nxv1i32.i32.i64(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) {
+  sf_vc_ivw_se_u32mf2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivw_se_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.nxv2i64.nxv2i32.i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivw_se_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.nxv2i64.nxv2i32.i32.i64(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, size_t vl) {
+  sf_vc_ivw_se_u32m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivw_se_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.nxv4i64.nxv4i32.i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivw_se_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.nxv4i64.nxv4i32.i32.i64(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, size_t vl) {
+  sf_vc_ivw_se_u32m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_ivw_se_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.nxv8i64.nxv8i32.i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_ivw_se_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivw.se.nxv8i64.nxv8i32.i32.i64(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_ivw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, size_t vl) {
+  sf_vc_ivw_se_u32m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv1i16.nxv1i8.i32.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv1i16.nxv1i8.i32.i64(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_sf_vc_v_ivw_se_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) {
+  return sf_vc_v_ivw_se_u8mf8(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv2i16.nxv2i8.i32.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv2i16.nxv2i8.i32.i64(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_sf_vc_v_ivw_se_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) {
+  return sf_vc_v_ivw_se_u8mf4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv4i16.nxv4i8.i32.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv4i16.nxv4i8.i32.i64(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_sf_vc_v_ivw_se_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) {
+  return sf_vc_v_ivw_se_u8mf2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv8i16.nxv8i8.i32.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv8i16.nxv8i8.i32.i64(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_sf_vc_v_ivw_se_u8m1(vuint16m2_t vd, vuint8m1_t vs2, size_t vl) {
+  return sf_vc_v_ivw_se_u8m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv16i16.nxv16i8.i32.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv16i16.nxv16i8.i32.i64(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_sf_vc_v_ivw_se_u8m2(vuint16m4_t vd, vuint8m2_t vs2, size_t vl) {
+  return sf_vc_v_ivw_se_u8m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv32i16.nxv32i8.i32.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv32i16.nxv32i8.i32.i64(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_sf_vc_v_ivw_se_u8m4(vuint16m8_t vd, vuint8m4_t vs2, size_t vl) {
+  return sf_vc_v_ivw_se_u8m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv1i32.nxv1i16.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv1i32.nxv1i16.i32.i64(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_sf_vc_v_ivw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) {
+  return sf_vc_v_ivw_se_u16mf4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv2i32.nxv2i16.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv2i32.nxv2i16.i32.i64(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_sf_vc_v_ivw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) {
+  return sf_vc_v_ivw_se_u16mf2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv4i32.nxv4i16.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv4i32.nxv4i16.i32.i64(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_sf_vc_v_ivw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, size_t vl) {
+  return sf_vc_v_ivw_se_u16m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv8i32.nxv8i16.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv8i32.nxv8i16.i32.i64(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_sf_vc_v_ivw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, size_t vl) {
+  return sf_vc_v_ivw_se_u16m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv16i32.nxv16i16.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv16i32.nxv16i16.i32.i64(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_sf_vc_v_ivw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, size_t vl) {
+  return sf_vc_v_ivw_se_u16m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv1i64.nxv1i32.i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv1i64.nxv1i32.i32.i64(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_sf_vc_v_ivw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) {
+  return sf_vc_v_ivw_se_u32mf2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv2i64.nxv2i32.i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv2i64.nxv2i32.i32.i64(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_sf_vc_v_ivw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, size_t vl) {
+  return sf_vc_v_ivw_se_u32m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv4i64.nxv4i32.i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv4i64.nxv4i32.i32.i64(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_sf_vc_v_ivw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, size_t vl) {
+  return sf_vc_v_ivw_se_u32m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv8i64.nxv8i32.i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv8i64.nxv8i32.i32.i64(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_sf_vc_v_ivw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, size_t vl) {
+  return sf_vc_v_ivw_se_u32m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivw.nxv1i16.nxv1i8.i32.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivw.nxv1i16.nxv1i8.i32.i64(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_sf_vc_v_ivw_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) {
+  return sf_vc_v_ivw_u8mf8(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivw.nxv2i16.nxv2i8.i32.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivw.nxv2i16.nxv2i8.i32.i64(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_sf_vc_v_ivw_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) {
+  return sf_vc_v_ivw_u8mf4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivw.nxv4i16.nxv4i8.i32.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivw.nxv4i16.nxv4i8.i32.i64(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_sf_vc_v_ivw_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) {
+  return sf_vc_v_ivw_u8mf2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivw.nxv8i16.nxv8i8.i32.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivw.nxv8i16.nxv8i8.i32.i64(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_sf_vc_v_ivw_u8m1(vuint16m2_t vd, vuint8m1_t vs2, size_t vl) {
+  return sf_vc_v_ivw_u8m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivw.nxv16i16.nxv16i8.i32.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivw.nxv16i16.nxv16i8.i32.i64(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_sf_vc_v_ivw_u8m2(vuint16m4_t vd, vuint8m2_t vs2, size_t vl) {
+  return sf_vc_v_ivw_u8m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivw.nxv32i16.nxv32i8.i32.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivw.nxv32i16.nxv32i8.i32.i64(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_sf_vc_v_ivw_u8m4(vuint16m8_t vd, vuint8m4_t vs2, size_t vl) {
+  return sf_vc_v_ivw_u8m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivw.nxv1i32.nxv1i16.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivw.nxv1i32.nxv1i16.i32.i64(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_sf_vc_v_ivw_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) {
+  return sf_vc_v_ivw_u16mf4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivw.nxv2i32.nxv2i16.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivw.nxv2i32.nxv2i16.i32.i64(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_sf_vc_v_ivw_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) {
+  return sf_vc_v_ivw_u16mf2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivw.nxv4i32.nxv4i16.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivw.nxv4i32.nxv4i16.i32.i64(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_sf_vc_v_ivw_u16m1(vuint32m2_t vd, vuint16m1_t vs2, size_t vl) {
+  return sf_vc_v_ivw_u16m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivw.nxv8i32.nxv8i16.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivw.nxv8i32.nxv8i16.i32.i64(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_sf_vc_v_ivw_u16m2(vuint32m4_t vd, vuint16m2_t vs2, size_t vl) {
+  return sf_vc_v_ivw_u16m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivw.nxv16i32.nxv16i16.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivw.nxv16i32.nxv16i16.i32.i64(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_sf_vc_v_ivw_u16m4(vuint32m8_t vd, vuint16m4_t vs2, size_t vl) {
+  return sf_vc_v_ivw_u16m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivw.nxv1i64.nxv1i32.i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivw.nxv1i64.nxv1i32.i32.i64(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_sf_vc_v_ivw_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) {
+  return sf_vc_v_ivw_u32mf2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivw.nxv2i64.nxv2i32.i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivw.nxv2i64.nxv2i32.i32.i64(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_sf_vc_v_ivw_u32m1(vuint64m2_t vd, vuint32m1_t vs2, size_t vl) {
+  return sf_vc_v_ivw_u32m1(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivw.nxv4i64.nxv4i32.i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivw.nxv4i64.nxv4i32.i32.i64(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_sf_vc_v_ivw_u32m2(vuint64m4_t vd, vuint32m2_t vs2, size_t vl) {
+  return sf_vc_v_ivw_u32m2(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivw.nxv8i64.nxv8i32.i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivw.nxv8i64.nxv8i32.i32.i64(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_sf_vc_v_ivw_u32m4(vuint64m8_t vd, vuint32m4_t vs2, size_t vl) {
+  return sf_vc_v_ivw_u32m4(p27_26, vd, vs2, simm5, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvw_se_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.nxv1i32.nxv1i16.f16.i32(i32 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvw_se_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.nxv1i32.nxv1i16.f16.i64(i32 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, _Float16 fs1, size_t vl) {
+  sf_vc_fvw_se_u16mf4(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvw_se_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.nxv2i32.nxv2i16.f16.i32(i32 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvw_se_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.nxv2i32.nxv2i16.f16.i64(i32 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, _Float16 fs1, size_t vl) {
+  sf_vc_fvw_se_u16mf2(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvw_se_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.nxv4i32.nxv4i16.f16.i32(i32 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvw_se_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.nxv4i32.nxv4i16.f16.i64(i32 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, _Float16 fs1, size_t vl) {
+  sf_vc_fvw_se_u16m1(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvw_se_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.nxv8i32.nxv8i16.f16.i32(i32 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvw_se_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.nxv8i32.nxv8i16.f16.i64(i32 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, _Float16 fs1, size_t vl) {
+  sf_vc_fvw_se_u16m2(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvw_se_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.nxv16i32.nxv16i16.f16.i32(i32 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvw_se_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.nxv16i32.nxv16i16.f16.i64(i32 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, _Float16 fs1, size_t vl) {
+  sf_vc_fvw_se_u16m4(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvw_se_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.nxv1i64.nxv1i32.f32.i32(i32 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvw_se_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.nxv1i64.nxv1i32.f32.i64(i32 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, float fs1, size_t vl) {
+  sf_vc_fvw_se_u32mf2(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvw_se_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.nxv2i64.nxv2i32.f32.i32(i32 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvw_se_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.nxv2i64.nxv2i32.f32.i64(i32 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, float fs1, size_t vl) {
+  sf_vc_fvw_se_u32m1(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvw_se_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.nxv4i64.nxv4i32.f32.i32(i32 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvw_se_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.nxv4i64.nxv4i32.f32.i64(i32 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, float fs1, size_t vl) {
+  sf_vc_fvw_se_u32m2(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_fvw_se_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.nxv8i64.nxv8i32.f32.i32(i32 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_sf_vc_fvw_se_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvw.se.nxv8i64.nxv8i32.f32.i64(i32 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret void
+//
+void test_sf_vc_fvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, float fs1, size_t vl) {
+  sf_vc_fvw_se_u32m4(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv1i32.nxv1i16.f16.i32(i32 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv1i32.nxv1i16.f16.i64(i32 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_sf_vc_v_fvw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fvw_se_u16mf4(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv2i32.nxv2i16.f16.i32(i32 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv2i32.nxv2i16.f16.i64(i32 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_sf_vc_v_fvw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fvw_se_u16mf2(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv4i32.nxv4i16.f16.i32(i32 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv4i32.nxv4i16.f16.i64(i32 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_sf_vc_v_fvw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fvw_se_u16m1(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv8i32.nxv8i16.f16.i32(i32 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv8i32.nxv8i16.f16.i64(i32 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_sf_vc_v_fvw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fvw_se_u16m2(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv16i32.nxv16i16.f16.i32(i32 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv16i32.nxv16i16.f16.i64(i32 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_sf_vc_v_fvw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fvw_se_u16m4(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv1i64.nxv1i32.f32.i32(i32 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv1i64.nxv1i32.f32.i64(i32 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_sf_vc_v_fvw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, float fs1, size_t vl) {
+  return sf_vc_v_fvw_se_u32mf2(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv2i64.nxv2i32.f32.i32(i32 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv2i64.nxv2i32.f32.i64(i32 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_sf_vc_v_fvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, float fs1, size_t vl) {
+  return sf_vc_v_fvw_se_u32m1(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv4i64.nxv4i32.f32.i32(i32 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv4i64.nxv4i32.f32.i64(i32 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_sf_vc_v_fvw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, float fs1, size_t vl) {
+  return sf_vc_v_fvw_se_u32m2(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv8i64.nxv8i32.f32.i32(i32 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv8i64.nxv8i32.f32.i64(i32 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_sf_vc_v_fvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, float fs1, size_t vl) {
+  return sf_vc_v_fvw_se_u32m4(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvw.nxv1i32.nxv1i16.f16.i32(i32 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvw.nxv1i32.nxv1i16.f16.i64(i32 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_sf_vc_v_fvw_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fvw_u16mf4(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvw.nxv2i32.nxv2i16.f16.i32(i32 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvw.nxv2i32.nxv2i16.f16.i64(i32 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_sf_vc_v_fvw_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fvw_u16mf2(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvw.nxv4i32.nxv4i16.f16.i32(i32 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvw.nxv4i32.nxv4i16.f16.i64(i32 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_sf_vc_v_fvw_u16m1(vuint32m2_t vd, vuint16m1_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fvw_u16m1(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvw.nxv8i32.nxv8i16.f16.i32(i32 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvw.nxv8i32.nxv8i16.f16.i64(i32 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_sf_vc_v_fvw_u16m2(vuint32m4_t vd, vuint16m2_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fvw_u16m2(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvw.nxv16i32.nxv16i16.f16.i32(i32 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvw.nxv16i32.nxv16i16.f16.i64(i32 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_sf_vc_v_fvw_u16m4(vuint32m8_t vd, vuint16m4_t vs2, _Float16 fs1, size_t vl) {
+  return sf_vc_v_fvw_u16m4(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvw.nxv1i64.nxv1i32.f32.i32(i32 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvw.nxv1i64.nxv1i32.f32.i64(i32 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_sf_vc_v_fvw_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, float fs1, size_t vl) {
+  return sf_vc_v_fvw_u32mf2(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvw.nxv2i64.nxv2i32.f32.i32(i32 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvw.nxv2i64.nxv2i32.f32.i64(i32 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_sf_vc_v_fvw_u32m1(vuint64m2_t vd, vuint32m1_t vs2, float fs1, size_t vl) {
+  return sf_vc_v_fvw_u32m1(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvw.nxv4i64.nxv4i32.f32.i32(i32 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvw.nxv4i64.nxv4i32.f32.i64(i32 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_sf_vc_v_fvw_u32m2(vuint64m4_t vd, vuint32m2_t vs2, float fs1, size_t vl) {
+  return sf_vc_v_fvw_u32m2(p27, vd, vs2, fs1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvw.nxv8i64.nxv8i32.f32.i32(i32 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvw.nxv8i64.nxv8i32.f32.i64(i32 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_sf_vc_v_fvw_u32m4(vuint64m8_t vd, vuint32m4_t vs2, float fs1, size_t vl) {
+  return sf_vc_v_fvw_u32m4(p27, vd, vs2, fs1, vl);
+}


        


More information about the llvm-branch-commits mailing list