[clang] [llvm] [RISCV][SiFive] Reduce intrinsics of SiFive VCIX extension (PR #79407)

Brandon Wu via cfe-commits cfe-commits at lists.llvm.org
Wed Jan 24 21:26:22 PST 2024


https://github.com/4vtomat created https://github.com/llvm/llvm-project/pull/79407

This patch models LMUL and SEW as inputs in sf_vc_x_se and sf_vc_i_se,
it reduces 42 intrinsics in the lookup table.


>From 8aac072c1b03a37b0a43ceabe2ee80be22f4b974 Mon Sep 17 00:00:00 2001
From: Brandon Wu <brandon.wu at sifive.com>
Date: Wed, 24 Jan 2024 19:12:00 -0800
Subject: [PATCH] [RISCV][SiFive] Reduce intrinsics of SiFive VCIX extension

This patch models LMUL and SEW as inputs in sf_vc_x_se and sf_vc_i_se,
it reduces 42 intrinsics in the lookup table.
---
 .../clang/Basic/riscv_sifive_vector.td        |  29 +--
 clang/lib/Headers/sifive_vector.h             | 102 ++++++++++
 clang/lib/Sema/SemaChecking.cpp               |  56 +-----
 .../non-policy/non-overloaded/xsfvcp-x-rv64.c |  12 +-
 .../non-policy/non-overloaded/xsfvcp-x.c      | 186 ++++++++++--------
 .../xsfvcp-index-out-of-range.c               |  14 +-
 llvm/include/llvm/IR/IntrinsicsRISCVXsf.td    |  44 +++--
 llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp   |  64 ++++++
 llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h     |   2 +
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   |  88 ---------
 llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td    |  14 --
 .../RISCV/rvv/fixed-vectors-xsfvcp-x.ll       | 160 +++++++--------
 llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll       | 160 +++++++--------
 13 files changed, 490 insertions(+), 441 deletions(-)

diff --git a/clang/include/clang/Basic/riscv_sifive_vector.td b/clang/include/clang/Basic/riscv_sifive_vector.td
index ef5114d6105e48..0761712a40fbaf 100644
--- a/clang/include/clang/Basic/riscv_sifive_vector.td
+++ b/clang/include/clang/Basic/riscv_sifive_vector.td
@@ -46,34 +46,23 @@ multiclass VCIXBuiltinSet<string name, string IR_name, string suffix,
 }
 
 multiclass RVVVCIXBuiltinSet<list<string> range, string prototype,
-                             list<int> intrinsic_types, bit UseGPR> {
+                             list<int> intrinsic_types, bit UseGPR,
+                             string suffix = "Uv"> {
   foreach r = range in
     let RequiredFeatures = !if(!and(UseGPR, !eq(r, "l")),
                                ["Xsfvcp", "RV64"], ["Xsfvcp"]) in
-      defm : VCIXBuiltinSet<NAME, NAME, "Uv", prototype, r, intrinsic_types>;
+      defm : VCIXBuiltinSet<NAME, NAME, suffix, prototype, r, intrinsic_types>;
 }
 
-multiclass RVVVCIXBuiltinSetWVType<list<string> range, string prototype,
-                             list<int> intrinsic_types, bit UseGPR> {
-  foreach r = range in
-    let RequiredFeatures = !if(!and(UseGPR, !eq(r, "l")),
-                               ["Xsfvcp", "RV64"], ["Xsfvcp"]) in
-      // These intrinsics don't have any vector types in the output and inputs,
-      // but we still need to add vetvli for them. So we encode different
-      // VTYPE into the intrinsic names, and then will know which vsetvli is
-      // correct.
-      foreach s = VCIXSuffix<r>.suffix in
-        // Since we already encode the Vtype into the name, so just set
-        // Log2LMUL to zero.  Otherwise the RISCVVEmitter will expand
-        // lots of redundant intrinsic but have same names.
-        let Log2LMUL = [0] in
-          def : VCIXBuiltinSet<NAME # "_u" # s, NAME # "_e" # s,
-                               "", prototype, r, intrinsic_types>;
+multiclass RVVVCIXBuiltinSetWOSuffix<list<string> range, string prototype,
+                                     list<int> intrinsic_types, bit UseGPR> {
+  let Log2LMUL = [0] in
+  defm NAME : RVVVCIXBuiltinSet<range, prototype, intrinsic_types, UseGPR, "">;
 }
 
 let SupportOverloading = false in {
-  defm sf_vc_x_se  : RVVVCIXBuiltinSetWVType<["c", "s", "i", "l"], "0KzKzKzUe", [0, 3], UseGPR=1>;
-  defm sf_vc_i_se  : RVVVCIXBuiltinSetWVType<["c", "s", "i", "l"], "0KzKzKzKz", [2, 3], UseGPR=0>;
+  defm sf_vc_x  : RVVVCIXBuiltinSetWOSuffix<["i"], "0KzKzKzUeKzKz", [0, 3], UseGPR=1>;
+  defm sf_vc_i  : RVVVCIXBuiltinSetWOSuffix<["i"], "0KzKzKzKzKzKz", [2, 3], UseGPR=0>;
   defm sf_vc_xv    : RVVVCIXBuiltinSet<["csi", "l"], "0KzKzUvUe",  [0, 2, 3],  UseGPR=1>;
   defm sf_vc_iv    : RVVVCIXBuiltinSet<["csi", "l"], "0KzKzUvKz",  [0, 2, 3],  UseGPR=0>;
   defm sf_vc_vv    : RVVVCIXBuiltinSet<["csi", "l"], "0KzKzUvUv",  [0, 2, 3],  UseGPR=0>;
diff --git a/clang/lib/Headers/sifive_vector.h b/clang/lib/Headers/sifive_vector.h
index 42d7224db61454..2dea69947754ac 100644
--- a/clang/lib/Headers/sifive_vector.h
+++ b/clang/lib/Headers/sifive_vector.h
@@ -13,4 +13,106 @@
 
 #pragma clang riscv intrinsic sifive_vector
 
+#define __riscv_sf_vc_x_se_u8mf4(p27_26, p24_20, p11_7, rs1, vl)               \
+  __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, rs1, 8, 6, vl)
+#define __riscv_sf_vc_x_se_u8mf2(p27_26, p24_20, p11_7, rs1, vl)               \
+  __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, rs1, 8, 7, vl)
+#define __riscv_sf_vc_x_se_u8m1(p27_26, p24_20, p11_7, rs1, vl)                \
+  __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, rs1, 8, 0, vl)
+#define __riscv_sf_vc_x_se_u8m2(p27_26, p24_20, p11_7, rs1, vl)                \
+  __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, rs1, 8, 1, vl)
+#define __riscv_sf_vc_x_se_u8m4(p27_26, p24_20, p11_7, rs1, vl)                \
+  __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, rs1, 8, 2, vl)
+#define __riscv_sf_vc_x_se_u8m8(p27_26, p24_20, p11_7, rs1, vl)                \
+  __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, rs1, 8, 3, vl)
+
+#define __riscv_sf_vc_x_se_u16mf2(p27_26, p24_20, p11_7, rs1, vl)              \
+  __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, rs1, 16, 7, vl)
+#define __riscv_sf_vc_x_se_u16m1(p27_26, p24_20, p11_7, rs1, vl)               \
+  __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, rs1, 16, 0, vl)
+#define __riscv_sf_vc_x_se_u16m2(p27_26, p24_20, p11_7, rs1, vl)               \
+  __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, rs1, 16, 1, vl)
+#define __riscv_sf_vc_x_se_u16m4(p27_26, p24_20, p11_7, rs1, vl)               \
+  __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, rs1, 16, 2, vl)
+#define __riscv_sf_vc_x_se_u16m8(p27_26, p24_20, p11_7, rs1, vl)               \
+  __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, rs1, 16, 3, vl)
+
+#define __riscv_sf_vc_x_se_u32m1(p27_26, p24_20, p11_7, rs1, vl)               \
+  __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, rs1, 32, 0, vl)
+#define __riscv_sf_vc_x_se_u32m2(p27_26, p24_20, p11_7, rs1, vl)               \
+  __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, rs1, 32, 1, vl)
+#define __riscv_sf_vc_x_se_u32m4(p27_26, p24_20, p11_7, rs1, vl)               \
+  __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, rs1, 32, 2, vl)
+#define __riscv_sf_vc_x_se_u32m8(p27_26, p24_20, p11_7, rs1, vl)               \
+  __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, rs1, 32, 3, vl)
+
+#define __riscv_sf_vc_i_se_u8mf4(p27_26, p24_20, p11_7, simm5, vl)             \
+  __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 7, vl)
+#define __riscv_sf_vc_i_se_u8mf2(p27_26, p24_20, p11_7, simm5, vl)             \
+  __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 6, vl)
+#define __riscv_sf_vc_i_se_u8m1(p27_26, p24_20, p11_7, simm5, vl)              \
+  __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 0, vl)
+#define __riscv_sf_vc_i_se_u8m2(p27_26, p24_20, p11_7, simm5, vl)              \
+  __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 1, vl)
+#define __riscv_sf_vc_i_se_u8m4(p27_26, p24_20, p11_7, simm5, vl)              \
+  __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 2, vl)
+#define __riscv_sf_vc_i_se_u8m8(p27_26, p24_20, p11_7, simm5, vl)              \
+  __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 3, vl)
+
+#define __riscv_sf_vc_i_se_u16mf2(p27_26, p24_20, p11_7, simm5, vl)            \
+  __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 16, 7, vl)
+#define __riscv_sf_vc_i_se_u16m1(p27_26, p24_20, p11_7, simm5, vl)             \
+  __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 16, 0, vl)
+#define __riscv_sf_vc_i_se_u16m2(p27_26, p24_20, p11_7, simm5, vl)             \
+  __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 16, 1, vl)
+#define __riscv_sf_vc_i_se_u16m4(p27_26, p24_20, p11_7, simm5, vl)             \
+  __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 16, 2, vl)
+#define __riscv_sf_vc_i_se_u16m8(p27_26, p24_20, p11_7, simm5, vl)             \
+  __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 16, 3, vl)
+
+#define __riscv_sf_vc_i_se_u32m1(p27_26, p24_20, p11_7, simm5, vl)             \
+  __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 32, 0, vl)
+#define __riscv_sf_vc_i_se_u32m2(p27_26, p24_20, p11_7, simm5, vl)             \
+  __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 32, 1, vl)
+#define __riscv_sf_vc_i_se_u32m4(p27_26, p24_20, p11_7, simm5, vl)             \
+  __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 32, 2, vl)
+#define __riscv_sf_vc_i_se_u32m8(p27_26, p24_20, p11_7, simm5, vl)             \
+  __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 32, 3, vl)
+
+#if __riscv_v_elen >= 64
+#define __riscv_sf_vc_x_se_u8mf8(p27_26, p24_20, p11_7, rs1, vl)               \
+  __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, rs1, 8, 5, vl)
+#define __riscv_sf_vc_x_se_u16mf4(p27_26, p24_20, p11_7, rs1, vl)              \
+  __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, rs1, 16, 6, vl)
+#define __riscv_sf_vc_x_se_u32mf2(p27_26, p24_20, p11_7, rs1, vl)              \
+  __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, rs1, 32, 7, vl)
+
+#define __riscv_sf_vc_i_se_u8mf8(p27_26, p24_20, p11_7, simm5, vl)             \
+  __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 5, vl)
+#define __riscv_sf_vc_i_se_u16mf4(p27_26, p24_20, p11_7, simm5, vl)            \
+  __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 16, 6, vl)
+#define __riscv_sf_vc_i_se_u32mf2(p27_26, p24_20, p11_7, simm5, vl)            \
+  __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 32, 7, vl)
+
+#define __riscv_sf_vc_i_se_u64m1(p27_26, p24_20, p11_7, simm5, vl)             \
+  __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 64, 0, vl)
+#define __riscv_sf_vc_i_se_u64m2(p27_26, p24_20, p11_7, simm5, vl)             \
+  __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 64, 1, vl)
+#define __riscv_sf_vc_i_se_u64m4(p27_26, p24_20, p11_7, simm5, vl)             \
+  __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 64, 2, vl)
+#define __riscv_sf_vc_i_se_u64m8(p27_26, p24_20, p11_7, simm5, vl)             \
+  __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 64, 3, vl)
+
+#if __riscv_xlen >= 64
+#define __riscv_sf_vc_x_se_u64m1(p27_26, p24_20, p11_7, rs1, vl)               \
+  __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, rs1, 64, 0, vl)
+#define __riscv_sf_vc_x_se_u64m2(p27_26, p24_20, p11_7, rs1, vl)               \
+  __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, rs1, 64, 1, vl)
+#define __riscv_sf_vc_x_se_u64m4(p27_26, p24_20, p11_7, rs1, vl)               \
+  __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, rs1, 64, 2, vl)
+#define __riscv_sf_vc_x_se_u64m8(p27_26, p24_20, p11_7, rs1, vl)               \
+  __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, rs1, 64, 3, vl)
+#endif
+#endif
+
 #endif //_SIFIVE_VECTOR_H_
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index 4d280f25cc04c2..ad8a1235f9d22b 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -5441,33 +5441,13 @@ bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI,
            CheckInvalidVLENandLMUL(TI, TheCall, *this, Op3Type, ElemSize * 4);
   }
 
-  case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8mf8:
-  case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8mf4:
-  case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8mf2:
-  case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m1:
-  case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m2:
-  case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m4:
-  case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m8:
-  case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16mf4:
-  case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16mf2:
-  case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m1:
-  case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m2:
-  case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m4:
-  case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m8:
-  case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32mf2:
-  case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m1:
-  case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m2:
-  case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m4:
-  case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m8:
-  case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m1:
-  case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m2:
-  case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m4:
-  case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m8:
-    // bit_27_26, bit_24_20, bit_11_7, simm5
+  case RISCVVector::BI__builtin_rvv_sf_vc_i_se:
+    // bit_27_26, bit_24_20, bit_11_7, simm5, sew, log2lmul
     return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
            SemaBuiltinConstantArgRange(TheCall, 1, 0, 31) ||
            SemaBuiltinConstantArgRange(TheCall, 2, 0, 31) ||
-           SemaBuiltinConstantArgRange(TheCall, 3, -16, 15);
+           SemaBuiltinConstantArgRange(TheCall, 3, -16, 15) ||
+           CheckRISCVLMUL(TheCall, 5);
   case RISCVVector::BI__builtin_rvv_sf_vc_iv_se:
     // bit_27_26, bit_11_7, vs2, simm5
     return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
@@ -5493,32 +5473,12 @@ bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI,
     // bit_27_26, vd, vs2, simm5
     return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
            SemaBuiltinConstantArgRange(TheCall, 3, -16, 15);
-  case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8mf8:
-  case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8mf4:
-  case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8mf2:
-  case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m1:
-  case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m2:
-  case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m4:
-  case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m8:
-  case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16mf4:
-  case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16mf2:
-  case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m1:
-  case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m2:
-  case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m4:
-  case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m8:
-  case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32mf2:
-  case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m1:
-  case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m2:
-  case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m4:
-  case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m8:
-  case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m1:
-  case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m2:
-  case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m4:
-  case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m8:
-    // bit_27_26, bit_24_20, bit_11_7, xs1
+  case RISCVVector::BI__builtin_rvv_sf_vc_x_se:
+    // bit_27_26, bit_24_20, bit_11_7, xs1, sew, log2lmul
     return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
            SemaBuiltinConstantArgRange(TheCall, 1, 0, 31) ||
-           SemaBuiltinConstantArgRange(TheCall, 2, 0, 31);
+           SemaBuiltinConstantArgRange(TheCall, 2, 0, 31) ||
+           CheckRISCVLMUL(TheCall, 5);
   case RISCVVector::BI__builtin_rvv_sf_vc_xv_se:
   case RISCVVector::BI__builtin_rvv_sf_vc_vv_se:
     // bit_27_26, bit_11_7, vs2, xs1/vs1
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-x-rv64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-x-rv64.c
index fe6972dc7942f1..742b87f7b0d3a0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-x-rv64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-x-rv64.c
@@ -10,7 +10,8 @@
 
 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e64m1.i64.i64.i64(i64 3, i64 31, i64 31, i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[CONV:%.*]] = trunc i64 [[RS1:%.*]] to i32
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[CONV]], i64 64, i64 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_x_se_u64m1(uint64_t rs1, size_t vl) {
@@ -19,7 +20,8 @@ void test_sf_vc_x_se_u64m1(uint64_t rs1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e64m2.i64.i64.i64(i64 3, i64 31, i64 31, i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[CONV:%.*]] = trunc i64 [[RS1:%.*]] to i32
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[CONV]], i64 64, i64 1, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_x_se_u64m2(uint64_t rs1, size_t vl) {
@@ -28,7 +30,8 @@ void test_sf_vc_x_se_u64m2(uint64_t rs1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e64m4.i64.i64.i64(i64 3, i64 31, i64 31, i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[CONV:%.*]] = trunc i64 [[RS1:%.*]] to i32
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[CONV]], i64 64, i64 2, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_x_se_u64m4(uint64_t rs1, size_t vl) {
@@ -37,7 +40,8 @@ void test_sf_vc_x_se_u64m4(uint64_t rs1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e64m8.i64.i64.i64(i64 3, i64 31, i64 31, i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[CONV:%.*]] = trunc i64 [[RS1:%.*]] to i32
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[CONV]], i64 64, i64 3, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_x_se_u64m8(uint64_t rs1, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-x.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-x.c
index bac7e019c35b19..25703154b5ec2c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-x.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-x.c
@@ -12,12 +12,14 @@
 
 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u8mf8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e8mf8.i32.i8.i32(i32 3, i32 31, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[CONV:%.*]] = zext i8 [[RS1:%.*]] to i32
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 [[CONV]], i32 8, i32 5, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e8mf8.i64.i8.i64(i64 3, i64 31, i64 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[CONV:%.*]] = zext i8 [[RS1:%.*]] to i32
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[CONV]], i64 8, i64 5, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_x_se_u8mf8(uint8_t rs1, size_t vl) {
@@ -26,12 +28,14 @@ void test_sf_vc_x_se_u8mf8(uint8_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u8mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e8mf4.i32.i8.i32(i32 3, i32 31, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[CONV:%.*]] = zext i8 [[RS1:%.*]] to i32
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 [[CONV]], i32 8, i32 6, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e8mf4.i64.i8.i64(i64 3, i64 31, i64 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[CONV:%.*]] = zext i8 [[RS1:%.*]] to i32
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[CONV]], i64 8, i64 6, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_x_se_u8mf4(uint8_t rs1, size_t vl) {
@@ -40,12 +44,14 @@ void test_sf_vc_x_se_u8mf4(uint8_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u8mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e8mf2.i32.i8.i32(i32 3, i32 31, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[CONV:%.*]] = zext i8 [[RS1:%.*]] to i32
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 [[CONV]], i32 8, i32 7, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e8mf2.i64.i8.i64(i64 3, i64 31, i64 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[CONV:%.*]] = zext i8 [[RS1:%.*]] to i32
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[CONV]], i64 8, i64 7, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_x_se_u8mf2(uint8_t rs1, size_t vl) {
@@ -54,12 +60,14 @@ void test_sf_vc_x_se_u8mf2(uint8_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u8m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e8m1.i32.i8.i32(i32 3, i32 31, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[CONV:%.*]] = zext i8 [[RS1:%.*]] to i32
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 [[CONV]], i32 8, i32 0, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e8m1.i64.i8.i64(i64 3, i64 31, i64 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[CONV:%.*]] = zext i8 [[RS1:%.*]] to i32
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[CONV]], i64 8, i64 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_x_se_u8m1(uint8_t rs1, size_t vl) {
@@ -68,12 +76,14 @@ void test_sf_vc_x_se_u8m1(uint8_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u8m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e8m2.i32.i8.i32(i32 3, i32 31, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[CONV:%.*]] = zext i8 [[RS1:%.*]] to i32
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 [[CONV]], i32 8, i32 1, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e8m2.i64.i8.i64(i64 3, i64 31, i64 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[CONV:%.*]] = zext i8 [[RS1:%.*]] to i32
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[CONV]], i64 8, i64 1, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_x_se_u8m2(uint8_t rs1, size_t vl) {
@@ -82,12 +92,14 @@ void test_sf_vc_x_se_u8m2(uint8_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u8m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e8m4.i32.i8.i32(i32 3, i32 31, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[CONV:%.*]] = zext i8 [[RS1:%.*]] to i32
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 [[CONV]], i32 8, i32 2, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e8m4.i64.i8.i64(i64 3, i64 31, i64 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[CONV:%.*]] = zext i8 [[RS1:%.*]] to i32
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[CONV]], i64 8, i64 2, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_x_se_u8m4(uint8_t rs1, size_t vl) {
@@ -96,12 +108,14 @@ void test_sf_vc_x_se_u8m4(uint8_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u8m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e8m8.i32.i8.i32(i32 3, i32 31, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[CONV:%.*]] = zext i8 [[RS1:%.*]] to i32
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 [[CONV]], i32 8, i32 3, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e8m8.i64.i8.i64(i64 3, i64 31, i64 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[CONV:%.*]] = zext i8 [[RS1:%.*]] to i32
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[CONV]], i64 8, i64 3, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_x_se_u8m8(uint8_t rs1, size_t vl) {
@@ -110,12 +124,14 @@ void test_sf_vc_x_se_u8m8(uint8_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u16mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16mf4.i32.i16.i32(i32 3, i32 31, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[CONV:%.*]] = zext i16 [[RS1:%.*]] to i32
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 [[CONV]], i32 16, i32 6, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16mf4.i64.i16.i64(i64 3, i64 31, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[CONV:%.*]] = zext i16 [[RS1:%.*]] to i32
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[CONV]], i64 16, i64 6, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_x_se_u16mf4(uint16_t rs1, size_t vl) {
@@ -124,12 +140,14 @@ void test_sf_vc_x_se_u16mf4(uint16_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u16mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16mf2.i32.i16.i32(i32 3, i32 31, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[CONV:%.*]] = zext i16 [[RS1:%.*]] to i32
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 [[CONV]], i32 16, i32 7, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16mf2.i64.i16.i64(i64 3, i64 31, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[CONV:%.*]] = zext i16 [[RS1:%.*]] to i32
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[CONV]], i64 16, i64 7, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_x_se_u16mf2(uint16_t rs1, size_t vl) {
@@ -138,12 +156,14 @@ void test_sf_vc_x_se_u16mf2(uint16_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u16m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16m1.i32.i16.i32(i32 3, i32 31, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[CONV:%.*]] = zext i16 [[RS1:%.*]] to i32
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 [[CONV]], i32 16, i32 0, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16m1.i64.i16.i64(i64 3, i64 31, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[CONV:%.*]] = zext i16 [[RS1:%.*]] to i32
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[CONV]], i64 16, i64 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_x_se_u16m1(uint16_t rs1, size_t vl) {
@@ -152,12 +172,14 @@ void test_sf_vc_x_se_u16m1(uint16_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u16m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16m2.i32.i16.i32(i32 3, i32 31, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[CONV:%.*]] = zext i16 [[RS1:%.*]] to i32
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 [[CONV]], i32 16, i32 1, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16m2.i64.i16.i64(i64 3, i64 31, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[CONV:%.*]] = zext i16 [[RS1:%.*]] to i32
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[CONV]], i64 16, i64 1, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_x_se_u16m2(uint16_t rs1, size_t vl) {
@@ -166,12 +188,14 @@ void test_sf_vc_x_se_u16m2(uint16_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u16m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16m4.i32.i16.i32(i32 3, i32 31, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[CONV:%.*]] = zext i16 [[RS1:%.*]] to i32
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 [[CONV]], i32 16, i32 2, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16m4.i64.i16.i64(i64 3, i64 31, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[CONV:%.*]] = zext i16 [[RS1:%.*]] to i32
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[CONV]], i64 16, i64 2, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_x_se_u16m4(uint16_t rs1, size_t vl) {
@@ -180,12 +204,14 @@ void test_sf_vc_x_se_u16m4(uint16_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u16m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16m8.i32.i16.i32(i32 3, i32 31, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[CONV:%.*]] = zext i16 [[RS1:%.*]] to i32
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 [[CONV]], i32 16, i32 3, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e16m8.i64.i16.i64(i64 3, i64 31, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[CONV:%.*]] = zext i16 [[RS1:%.*]] to i32
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[CONV]], i64 16, i64 3, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_x_se_u16m8(uint16_t rs1, size_t vl) {
@@ -194,12 +220,12 @@ void test_sf_vc_x_se_u16m8(uint16_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u32mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e32mf2.i32.i32.i32(i32 3, i32 31, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 [[RS1:%.*]], i32 32, i32 7, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e32mf2.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[RS1:%.*]], i64 32, i64 7, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_x_se_u32mf2(uint32_t rs1, size_t vl) {
@@ -208,12 +234,12 @@ void test_sf_vc_x_se_u32mf2(uint32_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u32m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e32m1.i32.i32.i32(i32 3, i32 31, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 [[RS1:%.*]], i32 32, i32 0, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e32m1.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[RS1:%.*]], i64 32, i64 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_x_se_u32m1(uint32_t rs1, size_t vl) {
@@ -222,12 +248,12 @@ void test_sf_vc_x_se_u32m1(uint32_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u32m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e32m2.i32.i32.i32(i32 3, i32 31, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 [[RS1:%.*]], i32 32, i32 1, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e32m2.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[RS1:%.*]], i64 32, i64 1, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_x_se_u32m2(uint32_t rs1, size_t vl) {
@@ -236,12 +262,12 @@ void test_sf_vc_x_se_u32m2(uint32_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u32m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e32m4.i32.i32.i32(i32 3, i32 31, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 [[RS1:%.*]], i32 32, i32 2, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e32m4.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[RS1:%.*]], i64 32, i64 2, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_x_se_u32m4(uint32_t rs1, size_t vl) {
@@ -250,12 +276,12 @@ void test_sf_vc_x_se_u32m4(uint32_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u32m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.e32m8.i32.i32.i32(i32 3, i32 31, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.x.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 [[RS1:%.*]], i32 32, i32 3, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.e32m8.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.x.se.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[RS1:%.*]], i64 32, i64 3, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_x_se_u32m8(uint32_t rs1, size_t vl) {
@@ -768,12 +794,12 @@ vuint32m8_t test_sf_vc_v_x_u32m8(uint32_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u8mf8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e8mf8.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 8, i32 5, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e8mf8.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 8, i64 5, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_i_se_u8mf8(size_t vl) {
@@ -782,12 +808,12 @@ void test_sf_vc_i_se_u8mf8(size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u8mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e8mf4.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 8, i32 7, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e8mf4.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 8, i64 7, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_i_se_u8mf4(size_t vl) {
@@ -796,12 +822,12 @@ void test_sf_vc_i_se_u8mf4(size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u8mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e8mf2.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 8, i32 6, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e8mf2.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 8, i64 6, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_i_se_u8mf2(size_t vl) {
@@ -810,12 +836,12 @@ void test_sf_vc_i_se_u8mf2(size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u8m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e8m1.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 8, i32 0, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e8m1.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 8, i64 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_i_se_u8m1(size_t vl) {
@@ -824,12 +850,12 @@ void test_sf_vc_i_se_u8m1(size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u8m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e8m2.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 8, i32 1, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e8m2.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 8, i64 1, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_i_se_u8m2(size_t vl) {
@@ -838,12 +864,12 @@ void test_sf_vc_i_se_u8m2(size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u8m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e8m4.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 8, i32 2, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e8m4.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 8, i64 2, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_i_se_u8m4(size_t vl) {
@@ -852,12 +878,12 @@ void test_sf_vc_i_se_u8m4(size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u8m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e8m8.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 8, i32 3, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e8m8.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 8, i64 3, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_i_se_u8m8(size_t vl) {
@@ -866,12 +892,12 @@ void test_sf_vc_i_se_u8m8(size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u16mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16mf4.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 16, i32 6, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16mf4.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 16, i64 6, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_i_se_u16mf4(size_t vl) {
@@ -880,12 +906,12 @@ void test_sf_vc_i_se_u16mf4(size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u16mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16mf2.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 16, i32 7, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16mf2.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 16, i64 7, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_i_se_u16mf2(size_t vl) {
@@ -894,12 +920,12 @@ void test_sf_vc_i_se_u16mf2(size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u16m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16m1.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 16, i32 0, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16m1.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 16, i64 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_i_se_u16m1(size_t vl) {
@@ -908,12 +934,12 @@ void test_sf_vc_i_se_u16m1(size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u16m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16m2.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 16, i32 1, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16m2.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 16, i64 1, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_i_se_u16m2(size_t vl) {
@@ -922,12 +948,12 @@ void test_sf_vc_i_se_u16m2(size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u16m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16m4.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 16, i32 2, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16m4.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 16, i64 2, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_i_se_u16m4(size_t vl) {
@@ -936,12 +962,12 @@ void test_sf_vc_i_se_u16m4(size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u16m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16m8.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 16, i32 3, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e16m8.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 16, i64 3, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_i_se_u16m8(size_t vl) {
@@ -950,12 +976,12 @@ void test_sf_vc_i_se_u16m8(size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u32mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e32mf2.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 32, i32 7, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e32mf2.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 32, i64 7, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_i_se_u32mf2(size_t vl) {
@@ -964,12 +990,12 @@ void test_sf_vc_i_se_u32mf2(size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u32m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e32m1.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 32, i32 0, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e32m1.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 32, i64 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_i_se_u32m1(size_t vl) {
@@ -978,12 +1004,12 @@ void test_sf_vc_i_se_u32m1(size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u32m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e32m2.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 32, i32 1, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e32m2.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 32, i64 1, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_i_se_u32m2(size_t vl) {
@@ -992,12 +1018,12 @@ void test_sf_vc_i_se_u32m2(size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u32m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e32m4.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 32, i32 2, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e32m4.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 32, i64 2, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_i_se_u32m4(size_t vl) {
@@ -1006,12 +1032,12 @@ void test_sf_vc_i_se_u32m4(size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u32m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e32m8.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 32, i32 3, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e32m8.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 32, i64 3, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_i_se_u32m8(size_t vl) {
@@ -1020,12 +1046,12 @@ void test_sf_vc_i_se_u32m8(size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u64m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e64m1.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 64, i32 0, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e64m1.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 64, i64 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_i_se_u64m1(size_t vl) {
@@ -1034,12 +1060,12 @@ void test_sf_vc_i_se_u64m1(size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u64m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e64m2.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 64, i32 1, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e64m2.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 64, i64 1, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_i_se_u64m2(size_t vl) {
@@ -1048,12 +1074,12 @@ void test_sf_vc_i_se_u64m2(size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u64m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e64m4.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 64, i32 2, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e64m4.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 64, i64 2, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_i_se_u64m4(size_t vl) {
@@ -1062,12 +1088,12 @@ void test_sf_vc_i_se_u64m4(size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u64m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.e64m8.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.i.se.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 64, i32 3, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.e64m8.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.i.se.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 64, i64 3, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_i_se_u64m8(size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/xsfvcp-index-out-of-range.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/xsfvcp-index-out-of-range.c
index a424af84e8ba45..f6ebf8589eeae6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/xsfvcp-index-out-of-range.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/xsfvcp-index-out-of-range.c
@@ -22,17 +22,17 @@
 // sf_vc_x_se
 
 void test_sf_vc_x_se_u8mf8_p27_26_not_constant(uint8_t rs1, size_t vl, int index) {
-  // expected-error at +1 {{argument to '__riscv_sf_vc_x_se_u8mf8' must be a constant integer}}
+  // expected-error at +1 {{argument to '__riscv_sf_vc_x_se' must be a constant integer}}
   __riscv_sf_vc_x_se_u8mf8(index, p24_20, p11_7, rs1, vl);
 }
 
 void test_sf_vc_x_se_u8mf8_24_20_not_constant(uint8_t rs1, size_t vl, int index) {
-  // expected-error at +1 {{argument to '__riscv_sf_vc_x_se_u8mf8' must be a constant integer}}
+  // expected-error at +1 {{argument to '__riscv_sf_vc_x_se' must be a constant integer}}
   __riscv_sf_vc_x_se_u8mf8(p27_26, index, p11_7, rs1, vl);
 }
 
 void test_sf_vc_x_se_u8mf8_11_7_not_constant(uint8_t rs1, size_t vl, int index) {
-  // expected-error at +1 {{argument to '__riscv_sf_vc_x_se_u8mf8' must be a constant integer}}
+  // expected-error at +1 {{argument to '__riscv_sf_vc_x_se' must be a constant integer}}
   __riscv_sf_vc_x_se_u8mf8(p27_26, p24_20, index, rs1, vl);
 }
 
@@ -54,22 +54,22 @@ void test_sf_vc_x_se_u8mf8_p11_7_overflow(uint8_t rs1, size_t vl) {
 // sf_vc_i_se
 
 void test_sf_vc_i_se_u8mf8_p27_26_not_constant(size_t vl, int index) {
-  // expected-error at +1 {{argument to '__riscv_sf_vc_i_se_u8mf8' must be a constant integer}}
+  // expected-error at +1 {{argument to '__riscv_sf_vc_i_se' must be a constant integer}}
   __riscv_sf_vc_i_se_u8mf8(index, p24_20, p11_7, simm5, vl);
 }
 
 void test_sf_vc_i_se_u8mf8_24_20_not_constant(size_t vl, int index) {
-  // expected-error at +1 {{argument to '__riscv_sf_vc_i_se_u8mf8' must be a constant integer}}
+  // expected-error at +1 {{argument to '__riscv_sf_vc_i_se' must be a constant integer}}
   __riscv_sf_vc_i_se_u8mf8(p27_26, index, p11_7, simm5, vl);
 }
 
 void test_sf_vc_i_se_u8mf8_11_7_not_constant(size_t vl, int index) {
-  // expected-error at +1 {{argument to '__riscv_sf_vc_i_se_u8mf8' must be a constant integer}}
+  // expected-error at +1 {{argument to '__riscv_sf_vc_i_se' must be a constant integer}}
   __riscv_sf_vc_i_se_u8mf8(p27_26, p24_20, index, simm5, vl);
 }
 
 void test_sf_vc_i_se_u8mf8_simm5_not_constant(size_t vl, int index) {
-  // expected-error at +1 {{argument to '__riscv_sf_vc_i_se_u8mf8' must be a constant integer}}
+  // expected-error at +1 {{argument to '__riscv_sf_vc_i_se' must be a constant integer}}
   __riscv_sf_vc_i_se_u8mf8(p27_26, p24_20, p11_7, index, vl);
 }
 
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td b/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td
index 008e86869b8bb0..4279661473d850 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td
@@ -18,24 +18,33 @@ class VCIXSuffix<string range> {
 }
 
 let TargetPrefix = "riscv" in {
-  // Output: (vector_out) or ()
+  // Output: (vector_out)
   // Input: (bit<27-26>, bit<24-20>, scalar_in, vl) or
-  //        (bit<27-26>, bit<24-20>, bit<11-7>, scalar_in, vl)
-  class RISCVSFCustomVC_X<bit HasDst, bit HasSE, bit ImmScalar>
-        : Intrinsic<!if(HasDst, [llvm_anyvector_ty], []),
-                    !listconcat(!if(HasDst, [llvm_anyint_ty, LLVMMatchType<1>],
-                                            [llvm_anyint_ty, LLVMMatchType<0>, LLVMMatchType<0>]),
-                                [llvm_any_ty, llvm_anyint_ty]),
+  class RISCVSFCustomVC_X<bit HasSE, bit ImmScalar>
+        : Intrinsic<[llvm_anyvector_ty],
+                    [llvm_anyint_ty, LLVMMatchType<1>, llvm_any_ty, llvm_anyint_ty],
                     !listconcat([IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>],    // bit<27-26> and bit<24-20>
-                                !if(HasDst, [], [ImmArg<ArgIndex<2>>]),                   // Vd or bit<11-7>
-                                !if(ImmScalar, !if(HasDst, [ImmArg<ArgIndex<2>>],
-                                                           [ImmArg<ArgIndex<3>>]), []),   // ScalarOperand
+                                !if(ImmScalar, [ImmArg<ArgIndex<2>>], []),                // ScalarOperand
                                 !if(HasSE, [IntrHasSideEffects], []))>,
           RISCVVIntrinsic {
     let ScalarOperand = !cond(ImmScalar: NoScalarOperand,
-                              HasDst: 2,
+                              true: 2);
+    let VLOperand = 3;
+  }
+  // Output: ()
+  // Input: (bit<27-26>, bit<24-20>, bit<11-7>, scalar_in, sew, log2lmul, vl)
+  class RISCVSFCustomVC_X_WO_Suffix<bit ImmScalar>
+        : Intrinsic<[],
+                    [llvm_anyint_ty, LLVMMatchType<0>, LLVMMatchType<0>,
+                     llvm_any_ty, LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
+                    !listconcat([IntrNoMem, IntrHasSideEffects, ImmArg<ArgIndex<0>>,
+                                 ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>,
+                                 ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>],    // bit<27-26>, bit<24-20>, bit<11-7>, sew, log2lmul
+                                !if(ImmScalar, [ImmArg<ArgIndex<3>>], []))>,   // ScalarOperand
+          RISCVVIntrinsic {
+    let ScalarOperand = !cond(ImmScalar: NoScalarOperand,
                               true: 3);
-    let VLOperand = !if(HasDst, 3, 4);
+    let VLOperand = 6;
   }
   // Output: (vector_out) or ()
   // Input: (bit<27-26>, vector_in, vector_in/scalar_in, vl) or
@@ -90,14 +99,9 @@ let TargetPrefix = "riscv" in {
   multiclass RISCVSFCustomVC_X<list<string> type> {
     foreach t = type in {
       defvar ImmScalar = !eq(t, "i");
-      defvar range = ["c", "s", "i", "l"];
-      foreach r = range in {
-        foreach s = VCIXSuffix<r>.suffix in {
-          def "int_riscv_sf_vc_" # t # "_se_" # s : RISCVSFCustomVC_X<HasDst=0, HasSE=1, ImmScalar=ImmScalar>;
-        }
-      }
-      def "int_riscv_sf_vc_v_" # t # "_se" : RISCVSFCustomVC_X<HasDst=1, HasSE=1, ImmScalar=ImmScalar>;
-      def "int_riscv_sf_vc_v_" # t         : RISCVSFCustomVC_X<HasDst=1, HasSE=0, ImmScalar=ImmScalar>;
+      def "int_riscv_sf_vc_" # t # "_se"   : RISCVSFCustomVC_X_WO_Suffix<ImmScalar=ImmScalar>;
+      def "int_riscv_sf_vc_v_" # t # "_se" : RISCVSFCustomVC_X<HasSE=1, ImmScalar=ImmScalar>;
+      def "int_riscv_sf_vc_v_" # t         : RISCVSFCustomVC_X<HasSE=0, ImmScalar=ImmScalar>;
     }
   }
 
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 4d6f74bc0285cb..5ae6dda3e5e28c 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -817,6 +817,66 @@ bool RISCVDAGToDAGISel::tryIndexedLoad(SDNode *Node) {
   return true;
 }
 
+void RISCVDAGToDAGISel::selectSF_VC_X_SE(SDNode *Node) {
+  if (!Subtarget->hasVInstructions())
+    return;
+
+  assert(Node->getOpcode() == ISD::INTRINSIC_VOID && "Unexpected opcode");
+
+  SDLoc DL(Node);
+  unsigned IntNo = Node->getConstantOperandVal(1);
+
+  assert((IntNo == Intrinsic::riscv_sf_vc_x_se ||
+          IntNo == Intrinsic::riscv_sf_vc_i_se) &&
+         "Unexpected vsetvli intrinsic");
+
+  // imm, imm, imm, simm5/scalar, sew, log2lmul, vl
+  auto *SewSDNode = dyn_cast<ConstantSDNode>(Node->getOperand(6));
+  unsigned Log2SEW = Log2_32(SewSDNode->getZExtValue());
+  SDValue SEWOp =
+      CurDAG->getTargetConstant(Log2SEW, DL, Subtarget->getXLenVT());
+  SmallVector<SDValue, 8> Operands = {Node->getOperand(2), Node->getOperand(3),
+                                      Node->getOperand(4), Node->getOperand(5),
+                                      Node->getOperand(8), SEWOp,
+                                      Node->getOperand(0)};
+
+  unsigned Opcode;
+  auto *LMulSDNode = dyn_cast<ConstantSDNode>(Node->getOperand(7));
+  switch (LMulSDNode->getSExtValue()) {
+  case 5:
+    Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_MF8
+                                                  : RISCV::PseudoVC_I_SE_MF8;
+    break;
+  case 6:
+    Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_MF4
+                                                  : RISCV::PseudoVC_I_SE_MF4;
+    break;
+  case 7:
+    Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_MF2
+                                                  : RISCV::PseudoVC_I_SE_MF2;
+    break;
+  case 0:
+    Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M1
+                                                  : RISCV::PseudoVC_I_SE_M1;
+    break;
+  case 1:
+    Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M2
+                                                  : RISCV::PseudoVC_I_SE_M2;
+    break;
+  case 2:
+    Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M4
+                                                  : RISCV::PseudoVC_I_SE_M4;
+    break;
+  case 3:
+    Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M8
+                                                  : RISCV::PseudoVC_I_SE_M8;
+    break;
+  }
+
+  ReplaceNode(Node, CurDAG->getMachineNode(
+                        Opcode, DL, Node->getSimpleValueType(0), Operands));
+}
+
 void RISCVDAGToDAGISel::Select(SDNode *Node) {
   // If we have a custom node, we have already selected.
   if (Node->isMachineOpcode()) {
@@ -1975,6 +2035,10 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
       ReplaceNode(Node, Store);
       return;
     }
+    case Intrinsic::riscv_sf_vc_x_se:
+    case Intrinsic::riscv_sf_vc_i_se:
+      selectSF_VC_X_SE(Node);
+      return;
     }
     break;
   }
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
index 77e174135a599f..92f818b0dc4891 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
@@ -160,6 +160,8 @@ class RISCVDAGToDAGISel : public SelectionDAGISel {
 
   void selectVSETVLI(SDNode *Node);
 
+  void selectSF_VC_X_SE(SDNode *Node);
+
   // Return the RISC-V condition code that matches the given DAG integer
   // condition code. The CondCode must be one of those supported by the RISC-V
   // ISA (see translateSetCCForBranch).
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 6ab1c82a95b0c3..794945fac4deca 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -9009,94 +9009,6 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
         ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), Ops,
         FixedIntrinsic->getMemoryVT(), FixedIntrinsic->getMemOperand());
   }
-  case Intrinsic::riscv_sf_vc_x_se_e8mf8:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_X_SE_E8MF8);
-  case Intrinsic::riscv_sf_vc_x_se_e8mf4:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_X_SE_E8MF4);
-  case Intrinsic::riscv_sf_vc_x_se_e8mf2:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_X_SE_E8MF2);
-  case Intrinsic::riscv_sf_vc_x_se_e8m1:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_X_SE_E8M1);
-  case Intrinsic::riscv_sf_vc_x_se_e8m2:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_X_SE_E8M2);
-  case Intrinsic::riscv_sf_vc_x_se_e8m4:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_X_SE_E8M4);
-  case Intrinsic::riscv_sf_vc_x_se_e8m8:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_X_SE_E8M8);
-  case Intrinsic::riscv_sf_vc_x_se_e16mf4:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_X_SE_E16MF4);
-  case Intrinsic::riscv_sf_vc_x_se_e16mf2:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_X_SE_E16MF2);
-  case Intrinsic::riscv_sf_vc_x_se_e16m1:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_X_SE_E16M1);
-  case Intrinsic::riscv_sf_vc_x_se_e16m2:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_X_SE_E16M2);
-  case Intrinsic::riscv_sf_vc_x_se_e16m4:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_X_SE_E16M4);
-  case Intrinsic::riscv_sf_vc_x_se_e16m8:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_X_SE_E16M8);
-  case Intrinsic::riscv_sf_vc_x_se_e32mf2:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_X_SE_E32MF2);
-  case Intrinsic::riscv_sf_vc_x_se_e32m1:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_X_SE_E32M1);
-  case Intrinsic::riscv_sf_vc_x_se_e32m2:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_X_SE_E32M2);
-  case Intrinsic::riscv_sf_vc_x_se_e32m4:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_X_SE_E32M4);
-  case Intrinsic::riscv_sf_vc_x_se_e32m8:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_X_SE_E32M8);
-  case Intrinsic::riscv_sf_vc_x_se_e64m1:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_X_SE_E64M1);
-  case Intrinsic::riscv_sf_vc_x_se_e64m2:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_X_SE_E64M2);
-  case Intrinsic::riscv_sf_vc_x_se_e64m4:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_X_SE_E64M4);
-  case Intrinsic::riscv_sf_vc_x_se_e64m8:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_X_SE_E64M8);
-  case Intrinsic::riscv_sf_vc_i_se_e8mf8:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_I_SE_E8MF8);
-  case Intrinsic::riscv_sf_vc_i_se_e8mf4:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_I_SE_E8MF4);
-  case Intrinsic::riscv_sf_vc_i_se_e8mf2:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_I_SE_E8MF2);
-  case Intrinsic::riscv_sf_vc_i_se_e8m1:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_I_SE_E8M1);
-  case Intrinsic::riscv_sf_vc_i_se_e8m2:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_I_SE_E8M2);
-  case Intrinsic::riscv_sf_vc_i_se_e8m4:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_I_SE_E8M4);
-  case Intrinsic::riscv_sf_vc_i_se_e8m8:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_I_SE_E8M8);
-  case Intrinsic::riscv_sf_vc_i_se_e16mf4:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_I_SE_E16MF4);
-  case Intrinsic::riscv_sf_vc_i_se_e16mf2:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_I_SE_E16MF2);
-  case Intrinsic::riscv_sf_vc_i_se_e16m1:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_I_SE_E16M1);
-  case Intrinsic::riscv_sf_vc_i_se_e16m2:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_I_SE_E16M2);
-  case Intrinsic::riscv_sf_vc_i_se_e16m4:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_I_SE_E16M4);
-  case Intrinsic::riscv_sf_vc_i_se_e16m8:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_I_SE_E16M8);
-  case Intrinsic::riscv_sf_vc_i_se_e32mf2:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_I_SE_E32MF2);
-  case Intrinsic::riscv_sf_vc_i_se_e32m1:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_I_SE_E32M1);
-  case Intrinsic::riscv_sf_vc_i_se_e32m2:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_I_SE_E32M2);
-  case Intrinsic::riscv_sf_vc_i_se_e32m4:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_I_SE_E32M4);
-  case Intrinsic::riscv_sf_vc_i_se_e32m8:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_I_SE_E32M8);
-  case Intrinsic::riscv_sf_vc_i_se_e64m1:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_I_SE_E64M1);
-  case Intrinsic::riscv_sf_vc_i_se_e64m2:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_I_SE_E64M2);
-  case Intrinsic::riscv_sf_vc_i_se_e64m4:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_I_SE_E64M4);
-  case Intrinsic::riscv_sf_vc_i_se_e64m8:
-    return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_I_SE_E64M8);
   case Intrinsic::riscv_sf_vc_xv_se:
     return getVCIXISDNodeVOID(Op, DAG, RISCVISD::SF_VC_XV_SE);
   case Intrinsic::riscv_sf_vc_iv_se:
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
index d22f98d693b1bf..b4130e3805a110 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
@@ -435,12 +435,6 @@ let Predicates = [HasVendorXSfvfnrclipxfqf] in {
 }
 
 // SDNode
-def SDT_SF_VC_X : SDTypeProfile<0, 5, [SDTCisSameAs<0, 1>,
-                                       SDTCisVT<0, XLenVT>,
-                                       SDTCisSameAs<0, 2>,
-                                       SDTCisSameAs<0, 3>,
-                                       SDTCisSameAs<0, 4>]>;
-
 def SDT_SF_VC_V_X : SDTypeProfile<1, 4, [SDTCisVec<0>,
                                          SDTCisVT<1, XLenVT>,
                                          SDTCisSameAs<1, 2>,
@@ -478,10 +472,6 @@ def SDT_SF_VC_V_XVW : SDTypeProfile<1, 5, [SDTCisVec<0>,
                                            SDTCisVec<3>,
                                            SDTCisSameAs<1, 5>]>;
 
-foreach vti = AllIntegerVectors in {
-  def sf_vc_x_e#vti.SEW#!tolower(vti.LMul.MX) : SDNode<"RISCVISD::SF_VC_X_SE_E"#vti.SEW#vti.LMul.MX, SDT_SF_VC_X, [SDNPHasChain]>;
-  def sf_vc_i_e#vti.SEW#!tolower(vti.LMul.MX) : SDNode<"RISCVISD::SF_VC_I_SE_E"#vti.SEW#vti.LMul.MX, SDT_SF_VC_X, [SDNPHasChain]>;
-}
 def sf_vc_v_x_se : SDNode<"RISCVISD::SF_VC_V_X_SE", SDT_SF_VC_V_X, [SDNPHasChain]>;
 def sf_vc_v_i_se : SDNode<"RISCVISD::SF_VC_V_I_SE", SDT_SF_VC_V_X, [SDNPHasChain]>;
 def sf_vc_vv_se : SDNode<"RISCVISD::SF_VC_VV_SE", SDT_SF_VC_XV, [SDNPHasChain]>;
@@ -646,10 +636,6 @@ class VPatVC_V_OP3<string intrinsic_name,
 
 multiclass VPatVC_X<string intrinsic_suffix, string instruction_suffix,
                     VTypeInfo vti, ValueType type, DAGOperand kind> {
-  def : VPatVC_OP4_ISD<!cast<SDPatternOperator>("sf_vc_" # intrinsic_suffix # "_e" # vti.SEW # !tolower(vti.LMul.MX)),
-                       "PseudoVC_" # instruction_suffix # "_SE_" # vti.LMul.MX,
-                       XLenVT, XLenVT, type, vti.Log2SEW,
-                       payload5, payload5, kind>;
   def : VPatVC_V_OP3_ISD<!cast<SDPatternOperator>("sf_vc_v_" # intrinsic_suffix # "_se"),
                          "PseudoVC_V_" # instruction_suffix # "_SE_" # vti.LMul.MX,
                          vti.Vector, XLenVT, type, vti.Log2SEW,
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-x.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-x.ll
index 7a20dd1d32b7e8..88803f7cd5d893 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-x.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-x.ll
@@ -11,11 +11,11 @@ define void @test_sf_vc_x_se_e8mf8(i8 zeroext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e8mf8.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e8mf8.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen 8, iXLen 5, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e8mf8.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e8mf8.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e8mf4(i8 zeroext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e8mf4:
@@ -24,11 +24,11 @@ define void @test_sf_vc_x_se_e8mf4(i8 zeroext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e8mf4.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e8mf4.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen 8, iXLen 6, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e8mf4.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e8mf4.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e8mf2(i8 zeroext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e8mf2:
@@ -37,11 +37,11 @@ define void @test_sf_vc_x_se_e8mf2(i8 zeroext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e8mf2.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e8mf2.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen 8, iXLen 7, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e8mf2.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e8mf2.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e8m1(i8 zeroext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e8m1:
@@ -50,11 +50,11 @@ define void @test_sf_vc_x_se_e8m1(i8 zeroext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e8m1.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e8m1.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen 8, iXLen 0, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e8m1.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e8m1.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e8m2(i8 zeroext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e8m2:
@@ -63,11 +63,11 @@ define void @test_sf_vc_x_se_e8m2(i8 zeroext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e8m2.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e8m2.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen 8, iXLen 1, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e8m2.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e8m2.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e8m4(i8 zeroext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e8m4:
@@ -76,11 +76,11 @@ define void @test_sf_vc_x_se_e8m4(i8 zeroext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e8m4.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e8m4.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen 8, iXLen 2, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e8m4.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e8m4.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e8m8(i8 zeroext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e8m8:
@@ -89,11 +89,11 @@ define void @test_sf_vc_x_se_e8m8(i8 zeroext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e8m8.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e8m8.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen 8, iXLen 3, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e8m8.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e8m8.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e16mf4(i16 zeroext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e16mf4:
@@ -102,11 +102,11 @@ define void @test_sf_vc_x_se_e16mf4(i16 zeroext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e16mf4.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e16mf4.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen 16, iXLen 6, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e16mf4.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e16mf4.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e16mf2(i16 zeroext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e16mf2:
@@ -115,11 +115,11 @@ define void @test_sf_vc_x_se_e16mf2(i16 zeroext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e16mf2.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e16mf2.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen 16, iXLen 7, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e16mf2.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e16mf2.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e16m1(i16 zeroext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e16m1:
@@ -128,11 +128,11 @@ define void @test_sf_vc_x_se_e16m1(i16 zeroext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e16m1.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e16m1.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen 16, iXLen 0, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e16m1.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e16m1.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e16m2(i16 zeroext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e16m2:
@@ -141,11 +141,11 @@ define void @test_sf_vc_x_se_e16m2(i16 zeroext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e16m2.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e16m2.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen 16, iXLen 1, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e16m2.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e16m2.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e16m4(i16 zeroext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e16m4:
@@ -154,11 +154,11 @@ define void @test_sf_vc_x_se_e16m4(i16 zeroext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e16m4.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e16m4.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen 16, iXLen 2, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e16m4.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e16m4.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e16m8(i16 zeroext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e16m8:
@@ -167,11 +167,11 @@ define void @test_sf_vc_x_se_e16m8(i16 zeroext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e16m8.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e16m8.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen 16, iXLen 3, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e16m8.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e16m8.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e32mf2(i32 signext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e32mf2:
@@ -180,11 +180,11 @@ define void @test_sf_vc_x_se_e32mf2(i32 signext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e32mf2.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e32mf2.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen 32, iXLen 7, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e32mf2.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e32mf2.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e32m1(i32 signext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e32m1:
@@ -193,11 +193,11 @@ define void @test_sf_vc_x_se_e32m1(i32 signext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e32m1.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e32m1.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen 32, iXLen 0, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e32m1.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e32m1.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e32m2(i32 signext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e32m2:
@@ -206,11 +206,11 @@ define void @test_sf_vc_x_se_e32m2(i32 signext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e32m2.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e32m2.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen 32, iXLen 1, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e32m2.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e32m2.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e32m4(i32 signext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e32m4:
@@ -219,11 +219,11 @@ define void @test_sf_vc_x_se_e32m4(i32 signext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e32m4.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e32m4.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen 32, iXLen 2, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e32m4.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e32m4.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e32m8(i32 signext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e32m8:
@@ -232,11 +232,11 @@ define void @test_sf_vc_x_se_e32m8(i32 signext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e32m8.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e32m8.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen 32, iXLen 3, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e32m8.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e32m8.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen, iXLen, iXLen)
 
 define <1 x i8> @test_sf_vc_v_x_se_e8mf8(i8 zeroext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_v_x_se_e8mf8:
@@ -713,11 +713,11 @@ define void @test_sf_vc_i_se_e8mf8(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e8mf8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e8mf8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 8, iXLen 5, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e8mf8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e8mf8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e8mf4(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e8mf4:
@@ -726,11 +726,11 @@ define void @test_sf_vc_i_se_e8mf4(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e8mf4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e8mf4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 8, iXLen 6, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e8mf4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e8mf4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e8mf2(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e8mf2:
@@ -739,11 +739,11 @@ define void @test_sf_vc_i_se_e8mf2(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e8mf2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e8mf2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 8, iXLen 7, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e8mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e8mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e8m1(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e8m1:
@@ -752,11 +752,11 @@ define void @test_sf_vc_i_se_e8m1(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e8m1.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e8m1.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 8, iXLen 0, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e8m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e8m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e8m2(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e8m2:
@@ -765,11 +765,11 @@ define void @test_sf_vc_i_se_e8m2(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e8m2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e8m2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 8, iXLen 1, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e8m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e8m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e8m4(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e8m4:
@@ -778,11 +778,11 @@ define void @test_sf_vc_i_se_e8m4(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e8m4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e8m4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 8, iXLen 2, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e8m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e8m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e8m8(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e8m8:
@@ -791,11 +791,11 @@ define void @test_sf_vc_i_se_e8m8(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e8m8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e8m8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 8, iXLen 3, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e8m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e8m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e16mf4(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e16mf4:
@@ -804,11 +804,11 @@ define void @test_sf_vc_i_se_e16mf4(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e16mf4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e16mf4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 16, iXLen 6, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e16mf4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e16mf4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e16mf2(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e16mf2:
@@ -817,11 +817,11 @@ define void @test_sf_vc_i_se_e16mf2(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e16mf2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e16mf2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 16, iXLen 7, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e16mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e16mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e16m1(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e16m1:
@@ -830,11 +830,11 @@ define void @test_sf_vc_i_se_e16m1(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e16m1.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e16m1.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 16, iXLen 0, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e16m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e16m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e16m2(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e16m2:
@@ -843,11 +843,11 @@ define void @test_sf_vc_i_se_e16m2(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e16m2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e16m2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 16, iXLen 1, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e16m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e16m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e16m4(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e16m4:
@@ -856,11 +856,11 @@ define void @test_sf_vc_i_se_e16m4(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e16m4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e16m4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 16, iXLen 2, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e16m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e16m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e16m8(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e16m8:
@@ -869,11 +869,11 @@ define void @test_sf_vc_i_se_e16m8(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e16m8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e16m8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 16, iXLen 3, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e16m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e16m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e32mf2(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e32mf2:
@@ -882,11 +882,11 @@ define void @test_sf_vc_i_se_e32mf2(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e32mf2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e32mf2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 32, iXLen 7, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e32mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e32mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e32m1(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e32m1:
@@ -895,11 +895,11 @@ define void @test_sf_vc_i_se_e32m1(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e32m1.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e32m1.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 32, iXLen 0, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e32m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e32m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e32m2(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e32m2:
@@ -908,11 +908,11 @@ define void @test_sf_vc_i_se_e32m2(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e32m2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e32m2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 32, iXLen 1, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e32m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e32m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e32m4(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e32m4:
@@ -921,11 +921,11 @@ define void @test_sf_vc_i_se_e32m4(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e32m4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e32m4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 32, iXLen 2, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e32m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e32m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e32m8(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e32m8:
@@ -934,11 +934,11 @@ define void @test_sf_vc_i_se_e32m8(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e32m8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e32m8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 32, iXLen 3, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e32m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e32m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e64m1(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e64m1:
@@ -947,11 +947,11 @@ define void @test_sf_vc_i_se_e64m1(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e64m1.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e64m1.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 64, iXLen 0, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e64m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e64m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e64m2(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e64m2:
@@ -960,11 +960,11 @@ define void @test_sf_vc_i_se_e64m2(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e64m2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e64m2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 64, iXLen 1, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e64m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e64m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e64m4(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e64m4:
@@ -973,11 +973,11 @@ define void @test_sf_vc_i_se_e64m4(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e64m4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e64m4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 64, iXLen 2, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e64m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e64m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e64m8(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e64m8:
@@ -986,11 +986,11 @@ define void @test_sf_vc_i_se_e64m8(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e64m8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e64m8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 64, iXLen 3, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e64m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e64m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define <1 x i8> @test_sf_vc_v_i_se_e8mf8(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_v_i_se_e8mf8:
diff --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll
index e9e7fa077b52bc..37899e4a80e927 100644
--- a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll
@@ -11,11 +11,11 @@ define void @test_sf_vc_x_se_e8mf8(i8 zeroext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e8mf8.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e8mf8.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen 8, iXLen 5, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e8mf8.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e8mf8.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e8mf4(i8 zeroext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e8mf4:
@@ -24,11 +24,11 @@ define void @test_sf_vc_x_se_e8mf4(i8 zeroext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e8mf4.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e8mf4.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen 8, iXLen 6, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e8mf4.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e8mf4.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e8mf2(i8 zeroext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e8mf2:
@@ -37,11 +37,11 @@ define void @test_sf_vc_x_se_e8mf2(i8 zeroext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e8mf2.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e8mf2.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen 8, iXLen 7, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e8mf2.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e8mf2.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e8m1(i8 zeroext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e8m1:
@@ -50,11 +50,11 @@ define void @test_sf_vc_x_se_e8m1(i8 zeroext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e8m1.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e8m1.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen 8, iXLen 0, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e8m1.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e8m1.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e8m2(i8 zeroext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e8m2:
@@ -63,11 +63,11 @@ define void @test_sf_vc_x_se_e8m2(i8 zeroext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e8m2.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e8m2.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen 8, iXLen 1, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e8m2.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e8m2.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e8m4(i8 zeroext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e8m4:
@@ -76,11 +76,11 @@ define void @test_sf_vc_x_se_e8m4(i8 zeroext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e8m4.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e8m4.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen 8, iXLen 2, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e8m4.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e8m4.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e8m8(i8 zeroext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e8m8:
@@ -89,11 +89,11 @@ define void @test_sf_vc_x_se_e8m8(i8 zeroext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e8m8.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e8m8.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen 8, iXLen 3, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e8m8.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e8m8.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e16mf4(i16 zeroext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e16mf4:
@@ -102,11 +102,11 @@ define void @test_sf_vc_x_se_e16mf4(i16 zeroext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e16mf4.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e16mf4.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen 16, iXLen 6, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e16mf4.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e16mf4.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e16mf2(i16 zeroext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e16mf2:
@@ -115,11 +115,11 @@ define void @test_sf_vc_x_se_e16mf2(i16 zeroext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e16mf2.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e16mf2.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen 16, iXLen 7, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e16mf2.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e16mf2.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e16m1(i16 zeroext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e16m1:
@@ -128,11 +128,11 @@ define void @test_sf_vc_x_se_e16m1(i16 zeroext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e16m1.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e16m1.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen 16, iXLen 0, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e16m1.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e16m1.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e16m2(i16 zeroext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e16m2:
@@ -141,11 +141,11 @@ define void @test_sf_vc_x_se_e16m2(i16 zeroext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e16m2.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e16m2.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen 16, iXLen 1, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e16m2.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e16m2.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e16m4(i16 zeroext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e16m4:
@@ -154,11 +154,11 @@ define void @test_sf_vc_x_se_e16m4(i16 zeroext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e16m4.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e16m4.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen 16, iXLen 2, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e16m4.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e16m4.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e16m8(i16 zeroext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e16m8:
@@ -167,11 +167,11 @@ define void @test_sf_vc_x_se_e16m8(i16 zeroext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e16m8.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e16m8.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen 16, iXLen 3, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e16m8.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e16m8.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e32mf2(i32 signext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e32mf2:
@@ -180,11 +180,11 @@ define void @test_sf_vc_x_se_e32mf2(i32 signext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e32mf2.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e32mf2.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen 32, iXLen 7, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e32mf2.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e32mf2.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e32m1(i32 signext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e32m1:
@@ -193,11 +193,11 @@ define void @test_sf_vc_x_se_e32m1(i32 signext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e32m1.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e32m1.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen 32, iXLen 0, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e32m1.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e32m1.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e32m2(i32 signext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e32m2:
@@ -206,11 +206,11 @@ define void @test_sf_vc_x_se_e32m2(i32 signext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e32m2.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e32m2.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen 32, iXLen 1, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e32m2.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e32m2.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e32m4(i32 signext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e32m4:
@@ -219,11 +219,11 @@ define void @test_sf_vc_x_se_e32m4(i32 signext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e32m4.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e32m4.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen 32, iXLen 2, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e32m4.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e32m4.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_x_se_e32m8(i32 signext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_x_se_e32m8:
@@ -232,11 +232,11 @@ define void @test_sf_vc_x_se_e32m8(i32 signext %rs1, iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.x.se.e32m8.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.x.se.e32m8.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen 32, iXLen 3, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.x.se.e32m8.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen)
+declare void @llvm.riscv.sf.vc.x.se.e32m8.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen, iXLen, iXLen)
 
 define <vscale x 1 x i8> @test_sf_vc_v_x_se_e8mf8(i8 zeroext %rs1, iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_v_x_se_e8mf8:
@@ -713,11 +713,11 @@ define void @test_sf_vc_i_se_e8mf8(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e8mf8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e8mf8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 8, iXLen 5, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e8mf8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e8mf8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e8mf4(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e8mf4:
@@ -726,11 +726,11 @@ define void @test_sf_vc_i_se_e8mf4(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e8mf4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e8mf4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 8, iXLen 6, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e8mf4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e8mf4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e8mf2(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e8mf2:
@@ -739,11 +739,11 @@ define void @test_sf_vc_i_se_e8mf2(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e8mf2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e8mf2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 8, iXLen 7, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e8mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e8mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e8m1(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e8m1:
@@ -752,11 +752,11 @@ define void @test_sf_vc_i_se_e8m1(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e8m1.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e8m1.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 8, iXLen 0, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e8m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e8m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e8m2(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e8m2:
@@ -765,11 +765,11 @@ define void @test_sf_vc_i_se_e8m2(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e8m2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e8m2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 8, iXLen 1, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e8m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e8m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e8m4(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e8m4:
@@ -778,11 +778,11 @@ define void @test_sf_vc_i_se_e8m4(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e8m4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e8m4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 8, iXLen 2, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e8m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e8m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e8m8(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e8m8:
@@ -791,11 +791,11 @@ define void @test_sf_vc_i_se_e8m8(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e8m8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e8m8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 8, iXLen 3, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e8m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e8m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e16mf4(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e16mf4:
@@ -804,11 +804,11 @@ define void @test_sf_vc_i_se_e16mf4(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e16mf4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e16mf4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 16, iXLen 6, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e16mf4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e16mf4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e16mf2(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e16mf2:
@@ -817,11 +817,11 @@ define void @test_sf_vc_i_se_e16mf2(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e16mf2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e16mf2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 16, iXLen 7, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e16mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e16mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e16m1(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e16m1:
@@ -830,11 +830,11 @@ define void @test_sf_vc_i_se_e16m1(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e16m1.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e16m1.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 16, iXLen 0, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e16m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e16m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e16m2(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e16m2:
@@ -843,11 +843,11 @@ define void @test_sf_vc_i_se_e16m2(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e16m2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e16m2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 16, iXLen 1, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e16m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e16m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e16m4(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e16m4:
@@ -856,11 +856,11 @@ define void @test_sf_vc_i_se_e16m4(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e16m4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e16m4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 16, iXLen 2, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e16m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e16m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e16m8(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e16m8:
@@ -869,11 +869,11 @@ define void @test_sf_vc_i_se_e16m8(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e16m8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e16m8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 16, iXLen 3, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e16m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e16m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e32mf2(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e32mf2:
@@ -882,11 +882,11 @@ define void @test_sf_vc_i_se_e32mf2(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e32mf2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e32mf2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 32, iXLen 7, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e32mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e32mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e32m1(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e32m1:
@@ -895,11 +895,11 @@ define void @test_sf_vc_i_se_e32m1(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e32m1.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e32m1.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 32, iXLen 0, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e32m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e32m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e32m2(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e32m2:
@@ -908,11 +908,11 @@ define void @test_sf_vc_i_se_e32m2(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e32m2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e32m2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 32, iXLen 1, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e32m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e32m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e32m4(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e32m4:
@@ -921,11 +921,11 @@ define void @test_sf_vc_i_se_e32m4(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e32m4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e32m4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 32, iXLen 2, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e32m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e32m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e32m8(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e32m8:
@@ -934,11 +934,11 @@ define void @test_sf_vc_i_se_e32m8(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e32m8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e32m8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 32, iXLen 3, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e32m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e32m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e64m1(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e64m1:
@@ -947,11 +947,11 @@ define void @test_sf_vc_i_se_e64m1(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e64m1.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e64m1.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 64, iXLen 0, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e64m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e64m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e64m2(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e64m2:
@@ -960,11 +960,11 @@ define void @test_sf_vc_i_se_e64m2(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e64m2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e64m2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 64, iXLen 1, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e64m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e64m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e64m4(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e64m4:
@@ -973,11 +973,11 @@ define void @test_sf_vc_i_se_e64m4(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e64m4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e64m4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 64, iXLen 2, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e64m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e64m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define void @test_sf_vc_i_se_e64m8(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_i_se_e64m8:
@@ -986,11 +986,11 @@ define void @test_sf_vc_i_se_e64m8(iXLen %vl) {
 ; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.riscv.sf.vc.i.se.e64m8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+  tail call void @llvm.riscv.sf.vc.i.se.e64m8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen 64, iXLen 3, iXLen %vl)
   ret void
 }
 
-declare void @llvm.riscv.sf.vc.i.se.e64m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+declare void @llvm.riscv.sf.vc.i.se.e64m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen, iXLen, iXLen)
 
 define <vscale x 1 x i8> @test_sf_vc_v_i_se_e8mf8(iXLen %vl) {
 ; CHECK-LABEL: test_sf_vc_v_i_se_e8mf8:



More information about the cfe-commits mailing list