[clang] [RISCV] Convert all floating point vector type operands to integer vector type (PR #69559)

Brandon Wu via cfe-commits cfe-commits at lists.llvm.org
Wed Oct 18 21:19:20 PDT 2023


https://github.com/4vtomat updated https://github.com/llvm/llvm-project/pull/69559

>From c61f6fb37e5d8ad0b7dfa68803f5a0161d542867 Mon Sep 17 00:00:00 2001
From: Brandon Wu <brandon.wu at sifive.com>
Date: Wed, 18 Oct 2023 21:16:07 -0700
Subject: [PATCH 1/2] [RISCV] Convert all floating point vector type operands
 to integer vector type

---
 .../clang/Basic/riscv_sifive_vector.td        |  32 +-
 .../non-overloaded/xsfvcp-xv-rv64.c           |  16 +-
 .../non-policy/non-overloaded/xsfvcp-xv.c     | 616 ++++++------
 .../non-overloaded/xsfvcp-xvv-rv64.c          |  24 +-
 .../non-policy/non-overloaded/xsfvcp-xvv.c    | 924 +++++++++---------
 .../non-policy/non-overloaded/xsfvcp-xvw.c    | 432 ++++----
 llvm/include/llvm/IR/IntrinsicsRISCVXsf.td    |   8 +-
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   | 110 ++-
 llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td    |  40 +-
 9 files changed, 1122 insertions(+), 1080 deletions(-)

diff --git a/clang/include/clang/Basic/riscv_sifive_vector.td b/clang/include/clang/Basic/riscv_sifive_vector.td
index 6583a7eb7b2e59b..b3d201b4e4fc0d5 100644
--- a/clang/include/clang/Basic/riscv_sifive_vector.td
+++ b/clang/include/clang/Basic/riscv_sifive_vector.td
@@ -78,28 +78,28 @@ let SupportOverloading = false in {
   defm sf_vc_iv    : RVVVCIXBuiltinSet<["csi", "l"], "0KzKzUvKz",  [0, 2, 3],  UseGPR=0>;
   defm sf_vc_vv    : RVVVCIXBuiltinSet<["csi", "l"], "0KzKzUvUv",  [0, 2, 3],  UseGPR=0>;
   defm sf_vc_fv    : RVVVCIXBuiltinSet<["si",  "l"], "0KzKzUvFe",  [0, 2, 3],  UseGPR=0>;
-  defm sf_vc_xvv   : RVVVCIXBuiltinSet<["csi", "l"], "0KzUvUvUe",  [0, 1, 3],  UseGPR=1>;
-  defm sf_vc_ivv   : RVVVCIXBuiltinSet<["csi", "l"], "0KzUvUvKz",  [0, 1, 3],  UseGPR=0>;
-  defm sf_vc_vvv   : RVVVCIXBuiltinSet<["csi", "l"], "0KzUvUvUv",  [0, 1, 3],  UseGPR=0>;
-  defm sf_vc_fvv   : RVVVCIXBuiltinSet<["si",  "l"], "0KzUvUvFe",  [0, 1, 3],  UseGPR=0>;
+  defm sf_vc_xvv   : RVVVCIXBuiltinSet<["csi", "l"], "0KzUvUvUe",  [0, 1, 2, 3],  UseGPR=1>;
+  defm sf_vc_ivv   : RVVVCIXBuiltinSet<["csi", "l"], "0KzUvUvKz",  [0, 1, 2, 3],  UseGPR=0>;
+  defm sf_vc_vvv   : RVVVCIXBuiltinSet<["csi", "l"], "0KzUvUvUv",  [0, 1, 2, 3],  UseGPR=0>;
+  defm sf_vc_fvv   : RVVVCIXBuiltinSet<["si",  "l"], "0KzUvUvFe",  [0, 1, 2, 3],  UseGPR=0>;
   defm sf_vc_v_x   : RVVVCIXBuiltinSet<["csi", "l"], "UvKzKzUe",   [-1, 1, 2], UseGPR=1>;
   defm sf_vc_v_i   : RVVVCIXBuiltinSet<["csi", "l"], "UvKzKzKz",   [-1, 1, 2], UseGPR=0>;
-  defm sf_vc_v_xv  : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvUe",   [-1, 0, 2], UseGPR=1>;
-  defm sf_vc_v_iv  : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvKz",   [-1, 0, 2], UseGPR=0>;
-  defm sf_vc_v_vv  : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvUv",   [-1, 0, 2], UseGPR=0>;
-  defm sf_vc_v_fv  : RVVVCIXBuiltinSet<["si",  "l"], "UvKzUvFe",   [-1, 0, 2], UseGPR=0>;
-  defm sf_vc_v_xvv : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvUvUe", [-1, 0, 3], UseGPR=1>;
-  defm sf_vc_v_ivv : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvUvKz", [-1, 0, 3], UseGPR=0>;
-  defm sf_vc_v_vvv : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvUvUv", [-1, 0, 3], UseGPR=0>;
-  defm sf_vc_v_fvv : RVVVCIXBuiltinSet<["si",  "l"], "UvKzUvUvFe", [-1, 0, 3], UseGPR=0>;
+  defm sf_vc_v_xv  : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvUe",   [-1, 0, 1, 2], UseGPR=1>;
+  defm sf_vc_v_iv  : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvKz",   [-1, 0, 1, 2], UseGPR=0>;
+  defm sf_vc_v_vv  : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvUv",   [-1, 0, 1, 2], UseGPR=0>;
+  defm sf_vc_v_fv  : RVVVCIXBuiltinSet<["si",  "l"], "UvKzUvFe",   [-1, 0, 1, 2], UseGPR=0>;
+  defm sf_vc_v_xvv : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvUvUe", [-1, 0, 1, 2, 3], UseGPR=1>;
+  defm sf_vc_v_ivv : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvUvKz", [-1, 0, 1, 2, 3], UseGPR=0>;
+  defm sf_vc_v_vvv : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvUvUv", [-1, 0, 1, 2, 3], UseGPR=0>;
+  defm sf_vc_v_fvv : RVVVCIXBuiltinSet<["si",  "l"], "UvKzUvUvFe", [-1, 0, 1, 2, 3], UseGPR=0>;
   let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
     defm sf_vc_xvw   : RVVVCIXBuiltinSet<["csi"], "0KzUwUvUe",  [0, 1, 2, 3],  UseGPR=1>;
     defm sf_vc_ivw   : RVVVCIXBuiltinSet<["csi"], "0KzUwUvKz",  [0, 1, 2, 3],  UseGPR=0>;
     defm sf_vc_vvw   : RVVVCIXBuiltinSet<["csi"], "0KzUwUvUv",  [0, 1, 2, 3],  UseGPR=0>;
     defm sf_vc_fvw   : RVVVCIXBuiltinSet<["si"],  "0KzUwUvFe",  [0, 1, 2, 3],  UseGPR=0>;
-    defm sf_vc_v_xvw : RVVVCIXBuiltinSet<["csi"], "UwKzUwUvUe", [-1, 0, 2, 3], UseGPR=1>;
-    defm sf_vc_v_ivw : RVVVCIXBuiltinSet<["csi"], "UwKzUwUvKz", [-1, 0, 2, 3], UseGPR=0>;
-    defm sf_vc_v_vvw : RVVVCIXBuiltinSet<["csi"], "UwKzUwUvUv", [-1, 0, 2, 3], UseGPR=0>;
-    defm sf_vc_v_fvw : RVVVCIXBuiltinSet<["si"],  "UwKzUwUvFe", [-1, 0, 2, 3], UseGPR=0>;
+    defm sf_vc_v_xvw : RVVVCIXBuiltinSet<["csi"], "UwKzUwUvUe", [-1, 0, 1, 2, 3], UseGPR=1>;
+    defm sf_vc_v_ivw : RVVVCIXBuiltinSet<["csi"], "UwKzUwUvKz", [-1, 0, 1, 2, 3], UseGPR=0>;
+    defm sf_vc_v_vvw : RVVVCIXBuiltinSet<["csi"], "UwKzUwUvUv", [-1, 0, 1, 2, 3], UseGPR=0>;
+    defm sf_vc_v_fvw : RVVVCIXBuiltinSet<["si"],  "UwKzUwUvFe", [-1, 0, 1, 2, 3], UseGPR=0>;
   }
 }
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xv-rv64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xv-rv64.c
index 39704189ae45ed0..9e305049f22b1c6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xv-rv64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xv-rv64.c
@@ -45,7 +45,7 @@ void test_sf_vc_xv_se_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xv.se.nxv1i64.i64.i64.i64(i64 3, <vscale x 1 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xv.se.nxv1i64.i64.nxv1i64.i64.i64(i64 3, <vscale x 1 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_sf_vc_v_xv_se_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) {
@@ -54,7 +54,7 @@ vuint64m1_t test_sf_vc_v_xv_se_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xv.se.nxv2i64.i64.i64.i64(i64 3, <vscale x 2 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xv.se.nxv2i64.i64.nxv2i64.i64.i64(i64 3, <vscale x 2 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_sf_vc_v_xv_se_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) {
@@ -63,7 +63,7 @@ vuint64m2_t test_sf_vc_v_xv_se_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xv.se.nxv4i64.i64.i64.i64(i64 3, <vscale x 4 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xv.se.nxv4i64.i64.nxv4i64.i64.i64(i64 3, <vscale x 4 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_sf_vc_v_xv_se_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) {
@@ -72,7 +72,7 @@ vuint64m4_t test_sf_vc_v_xv_se_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xv.se.nxv8i64.i64.i64.i64(i64 3, <vscale x 8 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xv.se.nxv8i64.i64.nxv8i64.i64.i64(i64 3, <vscale x 8 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_sf_vc_v_xv_se_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) {
@@ -81,7 +81,7 @@ vuint64m8_t test_sf_vc_v_xv_se_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xv.nxv1i64.i64.i64.i64(i64 3, <vscale x 1 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xv.nxv1i64.i64.nxv1i64.i64.i64(i64 3, <vscale x 1 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_sf_vc_v_xv_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) {
@@ -90,7 +90,7 @@ vuint64m1_t test_sf_vc_v_xv_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xv.nxv2i64.i64.i64.i64(i64 3, <vscale x 2 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xv.nxv2i64.i64.nxv2i64.i64.i64(i64 3, <vscale x 2 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_sf_vc_v_xv_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) {
@@ -99,7 +99,7 @@ vuint64m2_t test_sf_vc_v_xv_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xv.nxv4i64.i64.i64.i64(i64 3, <vscale x 4 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xv.nxv4i64.i64.nxv4i64.i64.i64(i64 3, <vscale x 4 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_sf_vc_v_xv_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) {
@@ -108,7 +108,7 @@ vuint64m4_t test_sf_vc_v_xv_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xv.nxv8i64.i64.i64.i64(i64 3, <vscale x 8 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xv.nxv8i64.i64.nxv8i64.i64.i64(i64 3, <vscale x 8 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_sf_vc_v_xv_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xv.c
index f37748ac3020b99..a9e1f30b73b2833 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xv.c
@@ -320,12 +320,12 @@ void test_sf_vc_vv_se_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u8mf8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv1i8.i32.nxv1i8.i32(i32 3, <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv1i8.i32.nxv1i8.nxv1i8.i32(i32 3, <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv1i8.i64.nxv1i8.i64(i64 3, <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv1i8.i64.nxv1i8.nxv1i8.i64(i64 3, <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_sf_vc_v_vv_se_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
@@ -334,12 +334,12 @@ vuint8mf8_t test_sf_vc_v_vv_se_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u8mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv2i8.i32.nxv2i8.i32(i32 3, <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv2i8.i32.nxv2i8.nxv2i8.i32(i32 3, <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv2i8.i64.nxv2i8.i64(i64 3, <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv2i8.i64.nxv2i8.nxv2i8.i64(i64 3, <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_sf_vc_v_vv_se_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
@@ -348,12 +348,12 @@ vuint8mf4_t test_sf_vc_v_vv_se_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u8mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv4i8.i32.nxv4i8.i32(i32 3, <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv4i8.i32.nxv4i8.nxv4i8.i32(i32 3, <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv4i8.i64.nxv4i8.i64(i64 3, <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv4i8.i64.nxv4i8.nxv4i8.i64(i64 3, <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_sf_vc_v_vv_se_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
@@ -362,12 +362,12 @@ vuint8mf2_t test_sf_vc_v_vv_se_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u8m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv8i8.i32.nxv8i8.i32(i32 3, <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv8i8.i32.nxv8i8.nxv8i8.i32(i32 3, <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv8i8.i64.nxv8i8.i64(i64 3, <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv8i8.i64.nxv8i8.nxv8i8.i64(i64 3, <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_sf_vc_v_vv_se_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
@@ -376,12 +376,12 @@ vuint8m1_t test_sf_vc_v_vv_se_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u8m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv16i8.i32.nxv16i8.i32(i32 3, <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv16i8.i32.nxv16i8.nxv16i8.i32(i32 3, <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv16i8.i64.nxv16i8.i64(i64 3, <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv16i8.i64.nxv16i8.nxv16i8.i64(i64 3, <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_sf_vc_v_vv_se_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
@@ -390,12 +390,12 @@ vuint8m2_t test_sf_vc_v_vv_se_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u8m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv32i8.i32.nxv32i8.i32(i32 3, <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv32i8.i32.nxv32i8.nxv32i8.i32(i32 3, <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv32i8.i64.nxv32i8.i64(i64 3, <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv32i8.i64.nxv32i8.nxv32i8.i64(i64 3, <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_sf_vc_v_vv_se_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
@@ -404,12 +404,12 @@ vuint8m4_t test_sf_vc_v_vv_se_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u8m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv64i8.i32.nxv64i8.i32(i32 3, <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv64i8.i32.nxv64i8.nxv64i8.i32(i32 3, <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv64i8.i64.nxv64i8.i64(i64 3, <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv64i8.i64.nxv64i8.nxv64i8.i64(i64 3, <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_sf_vc_v_vv_se_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
@@ -418,12 +418,12 @@ vuint8m8_t test_sf_vc_v_vv_se_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u16mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv1i16.i32.nxv1i16.i32(i32 3, <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv1i16.i32.nxv1i16.nxv1i16.i32(i32 3, <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv1i16.i64.nxv1i16.i64(i64 3, <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv1i16.i64.nxv1i16.nxv1i16.i64(i64 3, <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_sf_vc_v_vv_se_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
@@ -432,12 +432,12 @@ vuint16mf4_t test_sf_vc_v_vv_se_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u16mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv2i16.i32.nxv2i16.i32(i32 3, <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv2i16.i32.nxv2i16.nxv2i16.i32(i32 3, <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv2i16.i64.nxv2i16.i64(i64 3, <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv2i16.i64.nxv2i16.nxv2i16.i64(i64 3, <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_sf_vc_v_vv_se_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
@@ -446,12 +446,12 @@ vuint16mf2_t test_sf_vc_v_vv_se_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u16m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv4i16.i32.nxv4i16.i32(i32 3, <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv4i16.i32.nxv4i16.nxv4i16.i32(i32 3, <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv4i16.i64.nxv4i16.i64(i64 3, <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv4i16.i64.nxv4i16.nxv4i16.i64(i64 3, <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_sf_vc_v_vv_se_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
@@ -460,12 +460,12 @@ vuint16m1_t test_sf_vc_v_vv_se_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u16m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv8i16.i32.nxv8i16.i32(i32 3, <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv8i16.i32.nxv8i16.nxv8i16.i32(i32 3, <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv8i16.i64.nxv8i16.i64(i64 3, <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv8i16.i64.nxv8i16.nxv8i16.i64(i64 3, <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_sf_vc_v_vv_se_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
@@ -474,12 +474,12 @@ vuint16m2_t test_sf_vc_v_vv_se_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u16m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv16i16.i32.nxv16i16.i32(i32 3, <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv16i16.i32.nxv16i16.nxv16i16.i32(i32 3, <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv16i16.i64.nxv16i16.i64(i64 3, <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv16i16.i64.nxv16i16.nxv16i16.i64(i64 3, <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_sf_vc_v_vv_se_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
@@ -488,12 +488,12 @@ vuint16m4_t test_sf_vc_v_vv_se_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u16m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv32i16.i32.nxv32i16.i32(i32 3, <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv32i16.i32.nxv32i16.nxv32i16.i32(i32 3, <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv32i16.i64.nxv32i16.i64(i64 3, <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv32i16.i64.nxv32i16.nxv32i16.i64(i64 3, <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_sf_vc_v_vv_se_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
@@ -502,12 +502,12 @@ vuint16m8_t test_sf_vc_v_vv_se_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u32mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv1i32.i32.nxv1i32.i32(i32 3, <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv1i32.i32.nxv1i32.nxv1i32.i32(i32 3, <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv1i32.i64.nxv1i32.i64(i64 3, <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv1i32.i64.nxv1i32.nxv1i32.i64(i64 3, <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_sf_vc_v_vv_se_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
@@ -516,12 +516,12 @@ vuint32mf2_t test_sf_vc_v_vv_se_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u32m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv2i32.i32.nxv2i32.i32(i32 3, <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv2i32.i32.nxv2i32.nxv2i32.i32(i32 3, <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv2i32.i64.nxv2i32.i64(i64 3, <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv2i32.i64.nxv2i32.nxv2i32.i64(i64 3, <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_sf_vc_v_vv_se_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
@@ -530,12 +530,12 @@ vuint32m1_t test_sf_vc_v_vv_se_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u32m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv4i32.i32.nxv4i32.i32(i32 3, <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv4i32.i32.nxv4i32.nxv4i32.i32(i32 3, <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv4i32.i64.nxv4i32.i64(i64 3, <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv4i32.i64.nxv4i32.nxv4i32.i64(i64 3, <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_sf_vc_v_vv_se_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
@@ -544,12 +544,12 @@ vuint32m2_t test_sf_vc_v_vv_se_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u32m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv8i32.i32.nxv8i32.i32(i32 3, <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv8i32.i32.nxv8i32.nxv8i32.i32(i32 3, <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv8i32.i64.nxv8i32.i64(i64 3, <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv8i32.i64.nxv8i32.nxv8i32.i64(i64 3, <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_sf_vc_v_vv_se_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
@@ -558,12 +558,12 @@ vuint32m4_t test_sf_vc_v_vv_se_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u32m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv16i32.i32.nxv16i32.i32(i32 3, <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv16i32.i32.nxv16i32.nxv16i32.i32(i32 3, <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv16i32.i64.nxv16i32.i64(i64 3, <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv16i32.i64.nxv16i32.nxv16i32.i64(i64 3, <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_sf_vc_v_vv_se_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
@@ -572,12 +572,12 @@ vuint32m8_t test_sf_vc_v_vv_se_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u64m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv1i64.i32.nxv1i64.i32(i32 3, <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv1i64.i32.nxv1i64.nxv1i64.i32(i32 3, <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv1i64.i64.nxv1i64.i64(i64 3, <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv1i64.i64.nxv1i64.nxv1i64.i64(i64 3, <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_sf_vc_v_vv_se_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
@@ -586,12 +586,12 @@ vuint64m1_t test_sf_vc_v_vv_se_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u64m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv2i64.i32.nxv2i64.i32(i32 3, <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv2i64.i32.nxv2i64.nxv2i64.i32(i32 3, <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv2i64.i64.nxv2i64.i64(i64 3, <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv2i64.i64.nxv2i64.nxv2i64.i64(i64 3, <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_sf_vc_v_vv_se_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
@@ -600,12 +600,12 @@ vuint64m2_t test_sf_vc_v_vv_se_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u64m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv4i64.i32.nxv4i64.i32(i32 3, <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv4i64.i32.nxv4i64.nxv4i64.i32(i32 3, <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv4i64.i64.nxv4i64.i64(i64 3, <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv4i64.i64.nxv4i64.nxv4i64.i64(i64 3, <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_sf_vc_v_vv_se_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
@@ -614,12 +614,12 @@ vuint64m4_t test_sf_vc_v_vv_se_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u64m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv8i64.i32.nxv8i64.i32(i32 3, <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv8i64.i32.nxv8i64.nxv8i64.i32(i32 3, <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv8i64.i64.nxv8i64.i64(i64 3, <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv8i64.i64.nxv8i64.nxv8i64.i64(i64 3, <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_sf_vc_v_vv_se_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
@@ -628,12 +628,12 @@ vuint64m8_t test_sf_vc_v_vv_se_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u8mf8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vv.nxv1i8.i32.nxv1i8.i32(i32 3, <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vv.nxv1i8.i32.nxv1i8.nxv1i8.i32(i32 3, <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vv.nxv1i8.i64.nxv1i8.i64(i64 3, <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vv.nxv1i8.i64.nxv1i8.nxv1i8.i64(i64 3, <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_sf_vc_v_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
@@ -642,12 +642,12 @@ vuint8mf8_t test_sf_vc_v_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u8mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vv.nxv2i8.i32.nxv2i8.i32(i32 3, <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vv.nxv2i8.i32.nxv2i8.nxv2i8.i32(i32 3, <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vv.nxv2i8.i64.nxv2i8.i64(i64 3, <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vv.nxv2i8.i64.nxv2i8.nxv2i8.i64(i64 3, <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_sf_vc_v_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
@@ -656,12 +656,12 @@ vuint8mf4_t test_sf_vc_v_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u8mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vv.nxv4i8.i32.nxv4i8.i32(i32 3, <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vv.nxv4i8.i32.nxv4i8.nxv4i8.i32(i32 3, <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vv.nxv4i8.i64.nxv4i8.i64(i64 3, <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vv.nxv4i8.i64.nxv4i8.nxv4i8.i64(i64 3, <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_sf_vc_v_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
@@ -670,12 +670,12 @@ vuint8mf2_t test_sf_vc_v_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u8m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vv.nxv8i8.i32.nxv8i8.i32(i32 3, <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vv.nxv8i8.i32.nxv8i8.nxv8i8.i32(i32 3, <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vv.nxv8i8.i64.nxv8i8.i64(i64 3, <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vv.nxv8i8.i64.nxv8i8.nxv8i8.i64(i64 3, <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_sf_vc_v_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
@@ -684,12 +684,12 @@ vuint8m1_t test_sf_vc_v_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u8m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vv.nxv16i8.i32.nxv16i8.i32(i32 3, <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vv.nxv16i8.i32.nxv16i8.nxv16i8.i32(i32 3, <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vv.nxv16i8.i64.nxv16i8.i64(i64 3, <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vv.nxv16i8.i64.nxv16i8.nxv16i8.i64(i64 3, <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_sf_vc_v_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
@@ -698,12 +698,12 @@ vuint8m2_t test_sf_vc_v_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u8m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vv.nxv32i8.i32.nxv32i8.i32(i32 3, <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vv.nxv32i8.i32.nxv32i8.nxv32i8.i32(i32 3, <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vv.nxv32i8.i64.nxv32i8.i64(i64 3, <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vv.nxv32i8.i64.nxv32i8.nxv32i8.i64(i64 3, <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_sf_vc_v_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
@@ -712,12 +712,12 @@ vuint8m4_t test_sf_vc_v_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u8m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vv.nxv64i8.i32.nxv64i8.i32(i32 3, <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vv.nxv64i8.i32.nxv64i8.nxv64i8.i32(i32 3, <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vv.nxv64i8.i64.nxv64i8.i64(i64 3, <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vv.nxv64i8.i64.nxv64i8.nxv64i8.i64(i64 3, <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_sf_vc_v_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
@@ -726,12 +726,12 @@ vuint8m8_t test_sf_vc_v_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u16mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vv.nxv1i16.i32.nxv1i16.i32(i32 3, <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vv.nxv1i16.i32.nxv1i16.nxv1i16.i32(i32 3, <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vv.nxv1i16.i64.nxv1i16.i64(i64 3, <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vv.nxv1i16.i64.nxv1i16.nxv1i16.i64(i64 3, <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_sf_vc_v_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
@@ -740,12 +740,12 @@ vuint16mf4_t test_sf_vc_v_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t v
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u16mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vv.nxv2i16.i32.nxv2i16.i32(i32 3, <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vv.nxv2i16.i32.nxv2i16.nxv2i16.i32(i32 3, <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vv.nxv2i16.i64.nxv2i16.i64(i64 3, <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vv.nxv2i16.i64.nxv2i16.nxv2i16.i64(i64 3, <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_sf_vc_v_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
@@ -754,12 +754,12 @@ vuint16mf2_t test_sf_vc_v_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t v
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u16m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vv.nxv4i16.i32.nxv4i16.i32(i32 3, <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vv.nxv4i16.i32.nxv4i16.nxv4i16.i32(i32 3, <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vv.nxv4i16.i64.nxv4i16.i64(i64 3, <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vv.nxv4i16.i64.nxv4i16.nxv4i16.i64(i64 3, <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_sf_vc_v_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
@@ -768,12 +768,12 @@ vuint16m1_t test_sf_vc_v_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u16m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vv.nxv8i16.i32.nxv8i16.i32(i32 3, <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vv.nxv8i16.i32.nxv8i16.nxv8i16.i32(i32 3, <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vv.nxv8i16.i64.nxv8i16.i64(i64 3, <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vv.nxv8i16.i64.nxv8i16.nxv8i16.i64(i64 3, <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_sf_vc_v_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
@@ -782,12 +782,12 @@ vuint16m2_t test_sf_vc_v_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u16m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vv.nxv16i16.i32.nxv16i16.i32(i32 3, <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vv.nxv16i16.i32.nxv16i16.nxv16i16.i32(i32 3, <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vv.nxv16i16.i64.nxv16i16.i64(i64 3, <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vv.nxv16i16.i64.nxv16i16.nxv16i16.i64(i64 3, <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_sf_vc_v_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
@@ -796,12 +796,12 @@ vuint16m4_t test_sf_vc_v_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u16m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vv.nxv32i16.i32.nxv32i16.i32(i32 3, <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vv.nxv32i16.i32.nxv32i16.nxv32i16.i32(i32 3, <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vv.nxv32i16.i64.nxv32i16.i64(i64 3, <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vv.nxv32i16.i64.nxv32i16.nxv32i16.i64(i64 3, <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_sf_vc_v_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
@@ -810,12 +810,12 @@ vuint16m8_t test_sf_vc_v_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u32mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vv.nxv1i32.i32.nxv1i32.i32(i32 3, <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vv.nxv1i32.i32.nxv1i32.nxv1i32.i32(i32 3, <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vv.nxv1i32.i64.nxv1i32.i64(i64 3, <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vv.nxv1i32.i64.nxv1i32.nxv1i32.i64(i64 3, <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_sf_vc_v_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
@@ -824,12 +824,12 @@ vuint32mf2_t test_sf_vc_v_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t v
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u32m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vv.nxv2i32.i32.nxv2i32.i32(i32 3, <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vv.nxv2i32.i32.nxv2i32.nxv2i32.i32(i32 3, <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vv.nxv2i32.i64.nxv2i32.i64(i64 3, <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vv.nxv2i32.i64.nxv2i32.nxv2i32.i64(i64 3, <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_sf_vc_v_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
@@ -838,12 +838,12 @@ vuint32m1_t test_sf_vc_v_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u32m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vv.nxv4i32.i32.nxv4i32.i32(i32 3, <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vv.nxv4i32.i32.nxv4i32.nxv4i32.i32(i32 3, <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vv.nxv4i32.i64.nxv4i32.i64(i64 3, <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vv.nxv4i32.i64.nxv4i32.nxv4i32.i64(i64 3, <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_sf_vc_v_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
@@ -852,12 +852,12 @@ vuint32m2_t test_sf_vc_v_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u32m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vv.nxv8i32.i32.nxv8i32.i32(i32 3, <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vv.nxv8i32.i32.nxv8i32.nxv8i32.i32(i32 3, <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vv.nxv8i32.i64.nxv8i32.i64(i64 3, <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vv.nxv8i32.i64.nxv8i32.nxv8i32.i64(i64 3, <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_sf_vc_v_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
@@ -866,12 +866,12 @@ vuint32m4_t test_sf_vc_v_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u32m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vv.nxv16i32.i32.nxv16i32.i32(i32 3, <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vv.nxv16i32.i32.nxv16i32.nxv16i32.i32(i32 3, <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vv.nxv16i32.i64.nxv16i32.i64(i64 3, <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vv.nxv16i32.i64.nxv16i32.nxv16i32.i64(i64 3, <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_sf_vc_v_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
@@ -880,12 +880,12 @@ vuint32m8_t test_sf_vc_v_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u64m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vv.nxv1i64.i32.nxv1i64.i32(i32 3, <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vv.nxv1i64.i32.nxv1i64.nxv1i64.i32(i32 3, <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vv.nxv1i64.i64.nxv1i64.i64(i64 3, <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vv.nxv1i64.i64.nxv1i64.nxv1i64.i64(i64 3, <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_sf_vc_v_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
@@ -894,12 +894,12 @@ vuint64m1_t test_sf_vc_v_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u64m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vv.nxv2i64.i32.nxv2i64.i32(i32 3, <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vv.nxv2i64.i32.nxv2i64.nxv2i64.i32(i32 3, <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vv.nxv2i64.i64.nxv2i64.i64(i64 3, <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vv.nxv2i64.i64.nxv2i64.nxv2i64.i64(i64 3, <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_sf_vc_v_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
@@ -908,12 +908,12 @@ vuint64m2_t test_sf_vc_v_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u64m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vv.nxv4i64.i32.nxv4i64.i32(i32 3, <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vv.nxv4i64.i32.nxv4i64.nxv4i64.i32(i32 3, <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vv.nxv4i64.i64.nxv4i64.i64(i64 3, <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vv.nxv4i64.i64.nxv4i64.nxv4i64.i64(i64 3, <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_sf_vc_v_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
@@ -922,12 +922,12 @@ vuint64m4_t test_sf_vc_v_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u64m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vv.nxv8i64.i32.nxv8i64.i32(i32 3, <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vv.nxv8i64.i32.nxv8i64.nxv8i64.i32(i32 3, <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vv.nxv8i64.i64.nxv8i64.i64(i64 3, <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vv.nxv8i64.i64.nxv8i64.nxv8i64.i64(i64 3, <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_sf_vc_v_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
@@ -1188,12 +1188,12 @@ void test_sf_vc_xv_se_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u8mf8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv1i8.i32.i8.i32(i32 3, <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv1i8.i32.nxv1i8.i8.i32(i32 3, <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv1i8.i64.i8.i64(i64 3, <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv1i8.i64.nxv1i8.i8.i64(i64 3, <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_sf_vc_v_xv_se_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) {
@@ -1202,12 +1202,12 @@ vuint8mf8_t test_sf_vc_v_xv_se_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u8mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv2i8.i32.i8.i32(i32 3, <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv2i8.i32.nxv2i8.i8.i32(i32 3, <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv2i8.i64.i8.i64(i64 3, <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv2i8.i64.nxv2i8.i8.i64(i64 3, <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_sf_vc_v_xv_se_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) {
@@ -1216,12 +1216,12 @@ vuint8mf4_t test_sf_vc_v_xv_se_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u8mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv4i8.i32.i8.i32(i32 3, <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv4i8.i32.nxv4i8.i8.i32(i32 3, <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv4i8.i64.i8.i64(i64 3, <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv4i8.i64.nxv4i8.i8.i64(i64 3, <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_sf_vc_v_xv_se_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) {
@@ -1230,12 +1230,12 @@ vuint8mf2_t test_sf_vc_v_xv_se_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u8m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv8i8.i32.i8.i32(i32 3, <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv8i8.i32.nxv8i8.i8.i32(i32 3, <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv8i8.i64.i8.i64(i64 3, <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv8i8.i64.nxv8i8.i8.i64(i64 3, <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_sf_vc_v_xv_se_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) {
@@ -1244,12 +1244,12 @@ vuint8m1_t test_sf_vc_v_xv_se_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u8m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv16i8.i32.i8.i32(i32 3, <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv16i8.i32.nxv16i8.i8.i32(i32 3, <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv16i8.i64.i8.i64(i64 3, <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv16i8.i64.nxv16i8.i8.i64(i64 3, <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_sf_vc_v_xv_se_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) {
@@ -1258,12 +1258,12 @@ vuint8m2_t test_sf_vc_v_xv_se_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u8m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv32i8.i32.i8.i32(i32 3, <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv32i8.i32.nxv32i8.i8.i32(i32 3, <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv32i8.i64.i8.i64(i64 3, <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv32i8.i64.nxv32i8.i8.i64(i64 3, <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_sf_vc_v_xv_se_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) {
@@ -1272,12 +1272,12 @@ vuint8m4_t test_sf_vc_v_xv_se_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u8m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv64i8.i32.i8.i32(i32 3, <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv64i8.i32.nxv64i8.i8.i32(i32 3, <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv64i8.i64.i8.i64(i64 3, <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv64i8.i64.nxv64i8.i8.i64(i64 3, <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_sf_vc_v_xv_se_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) {
@@ -1286,12 +1286,12 @@ vuint8m8_t test_sf_vc_v_xv_se_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u16mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv1i16.i32.i16.i32(i32 3, <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv1i16.i32.nxv1i16.i16.i32(i32 3, <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv1i16.i64.i16.i64(i64 3, <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv1i16.i64.nxv1i16.i16.i64(i64 3, <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_sf_vc_v_xv_se_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) {
@@ -1300,12 +1300,12 @@ vuint16mf4_t test_sf_vc_v_xv_se_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u16mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv2i16.i32.i16.i32(i32 3, <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv2i16.i32.nxv2i16.i16.i32(i32 3, <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv2i16.i64.i16.i64(i64 3, <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv2i16.i64.nxv2i16.i16.i64(i64 3, <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_sf_vc_v_xv_se_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) {
@@ -1314,12 +1314,12 @@ vuint16mf2_t test_sf_vc_v_xv_se_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u16m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv4i16.i32.i16.i32(i32 3, <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv4i16.i32.nxv4i16.i16.i32(i32 3, <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv4i16.i64.i16.i64(i64 3, <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv4i16.i64.nxv4i16.i16.i64(i64 3, <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_sf_vc_v_xv_se_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) {
@@ -1328,12 +1328,12 @@ vuint16m1_t test_sf_vc_v_xv_se_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u16m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv8i16.i32.i16.i32(i32 3, <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv8i16.i32.nxv8i16.i16.i32(i32 3, <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv8i16.i64.i16.i64(i64 3, <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv8i16.i64.nxv8i16.i16.i64(i64 3, <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_sf_vc_v_xv_se_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) {
@@ -1342,12 +1342,12 @@ vuint16m2_t test_sf_vc_v_xv_se_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u16m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv16i16.i32.i16.i32(i32 3, <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv16i16.i32.nxv16i16.i16.i32(i32 3, <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv16i16.i64.i16.i64(i64 3, <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv16i16.i64.nxv16i16.i16.i64(i64 3, <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_sf_vc_v_xv_se_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) {
@@ -1356,12 +1356,12 @@ vuint16m4_t test_sf_vc_v_xv_se_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u16m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv32i16.i32.i16.i32(i32 3, <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv32i16.i32.nxv32i16.i16.i32(i32 3, <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv32i16.i64.i16.i64(i64 3, <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv32i16.i64.nxv32i16.i16.i64(i64 3, <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_sf_vc_v_xv_se_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) {
@@ -1370,12 +1370,12 @@ vuint16m8_t test_sf_vc_v_xv_se_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u32mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv1i32.i32.i32.i32(i32 3, <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv1i32.i32.nxv1i32.i32.i32(i32 3, <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv1i32.i64.i32.i64(i64 3, <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv1i32.i64.nxv1i32.i32.i64(i64 3, <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_sf_vc_v_xv_se_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) {
@@ -1384,12 +1384,12 @@ vuint32mf2_t test_sf_vc_v_xv_se_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u32m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv2i32.i32.i32.i32(i32 3, <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv2i32.i32.nxv2i32.i32.i32(i32 3, <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv2i32.i64.i32.i64(i64 3, <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv2i32.i64.nxv2i32.i32.i64(i64 3, <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_sf_vc_v_xv_se_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) {
@@ -1398,12 +1398,12 @@ vuint32m1_t test_sf_vc_v_xv_se_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u32m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv4i32.i32.i32.i32(i32 3, <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv4i32.i32.nxv4i32.i32.i32(i32 3, <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv4i32.i64.i32.i64(i64 3, <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv4i32.i64.nxv4i32.i32.i64(i64 3, <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_sf_vc_v_xv_se_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) {
@@ -1412,12 +1412,12 @@ vuint32m2_t test_sf_vc_v_xv_se_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u32m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv8i32.i32.i32.i32(i32 3, <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv8i32.i32.nxv8i32.i32.i32(i32 3, <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv8i32.i64.i32.i64(i64 3, <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv8i32.i64.nxv8i32.i32.i64(i64 3, <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_sf_vc_v_xv_se_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) {
@@ -1426,12 +1426,12 @@ vuint32m4_t test_sf_vc_v_xv_se_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u32m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv16i32.i32.i32.i32(i32 3, <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv16i32.i32.nxv16i32.i32.i32(i32 3, <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv16i32.i64.i32.i64(i64 3, <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv16i32.i64.nxv16i32.i32.i64(i64 3, <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_sf_vc_v_xv_se_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) {
@@ -1440,12 +1440,12 @@ vuint32m8_t test_sf_vc_v_xv_se_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u8mf8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xv.nxv1i8.i32.i8.i32(i32 3, <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xv.nxv1i8.i32.nxv1i8.i8.i32(i32 3, <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xv.nxv1i8.i64.i8.i64(i64 3, <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xv.nxv1i8.i64.nxv1i8.i8.i64(i64 3, <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_sf_vc_v_xv_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) {
@@ -1454,12 +1454,12 @@ vuint8mf8_t test_sf_vc_v_xv_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u8mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xv.nxv2i8.i32.i8.i32(i32 3, <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xv.nxv2i8.i32.nxv2i8.i8.i32(i32 3, <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xv.nxv2i8.i64.i8.i64(i64 3, <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xv.nxv2i8.i64.nxv2i8.i8.i64(i64 3, <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_sf_vc_v_xv_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) {
@@ -1468,12 +1468,12 @@ vuint8mf4_t test_sf_vc_v_xv_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u8mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xv.nxv4i8.i32.i8.i32(i32 3, <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xv.nxv4i8.i32.nxv4i8.i8.i32(i32 3, <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xv.nxv4i8.i64.i8.i64(i64 3, <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xv.nxv4i8.i64.nxv4i8.i8.i64(i64 3, <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_sf_vc_v_xv_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) {
@@ -1482,12 +1482,12 @@ vuint8mf2_t test_sf_vc_v_xv_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u8m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xv.nxv8i8.i32.i8.i32(i32 3, <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xv.nxv8i8.i32.nxv8i8.i8.i32(i32 3, <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xv.nxv8i8.i64.i8.i64(i64 3, <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xv.nxv8i8.i64.nxv8i8.i8.i64(i64 3, <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_sf_vc_v_xv_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) {
@@ -1496,12 +1496,12 @@ vuint8m1_t test_sf_vc_v_xv_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u8m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xv.nxv16i8.i32.i8.i32(i32 3, <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xv.nxv16i8.i32.nxv16i8.i8.i32(i32 3, <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xv.nxv16i8.i64.i8.i64(i64 3, <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xv.nxv16i8.i64.nxv16i8.i8.i64(i64 3, <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_sf_vc_v_xv_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) {
@@ -1510,12 +1510,12 @@ vuint8m2_t test_sf_vc_v_xv_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u8m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xv.nxv32i8.i32.i8.i32(i32 3, <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xv.nxv32i8.i32.nxv32i8.i8.i32(i32 3, <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xv.nxv32i8.i64.i8.i64(i64 3, <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xv.nxv32i8.i64.nxv32i8.i8.i64(i64 3, <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_sf_vc_v_xv_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) {
@@ -1524,12 +1524,12 @@ vuint8m4_t test_sf_vc_v_xv_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u8m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xv.nxv64i8.i32.i8.i32(i32 3, <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xv.nxv64i8.i32.nxv64i8.i8.i32(i32 3, <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xv.nxv64i8.i64.i8.i64(i64 3, <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xv.nxv64i8.i64.nxv64i8.i8.i64(i64 3, <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_sf_vc_v_xv_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) {
@@ -1538,12 +1538,12 @@ vuint8m8_t test_sf_vc_v_xv_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u16mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xv.nxv1i16.i32.i16.i32(i32 3, <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xv.nxv1i16.i32.nxv1i16.i16.i32(i32 3, <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xv.nxv1i16.i64.i16.i64(i64 3, <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xv.nxv1i16.i64.nxv1i16.i16.i64(i64 3, <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_sf_vc_v_xv_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) {
@@ -1552,12 +1552,12 @@ vuint16mf4_t test_sf_vc_v_xv_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u16mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xv.nxv2i16.i32.i16.i32(i32 3, <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xv.nxv2i16.i32.nxv2i16.i16.i32(i32 3, <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xv.nxv2i16.i64.i16.i64(i64 3, <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xv.nxv2i16.i64.nxv2i16.i16.i64(i64 3, <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_sf_vc_v_xv_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) {
@@ -1566,12 +1566,12 @@ vuint16mf2_t test_sf_vc_v_xv_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u16m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xv.nxv4i16.i32.i16.i32(i32 3, <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xv.nxv4i16.i32.nxv4i16.i16.i32(i32 3, <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xv.nxv4i16.i64.i16.i64(i64 3, <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xv.nxv4i16.i64.nxv4i16.i16.i64(i64 3, <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_sf_vc_v_xv_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) {
@@ -1580,12 +1580,12 @@ vuint16m1_t test_sf_vc_v_xv_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u16m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xv.nxv8i16.i32.i16.i32(i32 3, <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xv.nxv8i16.i32.nxv8i16.i16.i32(i32 3, <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xv.nxv8i16.i64.i16.i64(i64 3, <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xv.nxv8i16.i64.nxv8i16.i16.i64(i64 3, <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_sf_vc_v_xv_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) {
@@ -1594,12 +1594,12 @@ vuint16m2_t test_sf_vc_v_xv_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u16m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xv.nxv16i16.i32.i16.i32(i32 3, <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xv.nxv16i16.i32.nxv16i16.i16.i32(i32 3, <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xv.nxv16i16.i64.i16.i64(i64 3, <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xv.nxv16i16.i64.nxv16i16.i16.i64(i64 3, <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_sf_vc_v_xv_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) {
@@ -1608,12 +1608,12 @@ vuint16m4_t test_sf_vc_v_xv_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u16m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xv.nxv32i16.i32.i16.i32(i32 3, <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xv.nxv32i16.i32.nxv32i16.i16.i32(i32 3, <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xv.nxv32i16.i64.i16.i64(i64 3, <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xv.nxv32i16.i64.nxv32i16.i16.i64(i64 3, <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_sf_vc_v_xv_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) {
@@ -1622,12 +1622,12 @@ vuint16m8_t test_sf_vc_v_xv_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u32mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xv.nxv1i32.i32.i32.i32(i32 3, <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xv.nxv1i32.i32.nxv1i32.i32.i32(i32 3, <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xv.nxv1i32.i64.i32.i64(i64 3, <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xv.nxv1i32.i64.nxv1i32.i32.i64(i64 3, <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_sf_vc_v_xv_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) {
@@ -1636,12 +1636,12 @@ vuint32mf2_t test_sf_vc_v_xv_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u32m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xv.nxv2i32.i32.i32.i32(i32 3, <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xv.nxv2i32.i32.nxv2i32.i32.i32(i32 3, <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xv.nxv2i32.i64.i32.i64(i64 3, <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xv.nxv2i32.i64.nxv2i32.i32.i64(i64 3, <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_sf_vc_v_xv_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) {
@@ -1650,12 +1650,12 @@ vuint32m1_t test_sf_vc_v_xv_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u32m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xv.nxv4i32.i32.i32.i32(i32 3, <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xv.nxv4i32.i32.nxv4i32.i32.i32(i32 3, <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xv.nxv4i32.i64.i32.i64(i64 3, <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xv.nxv4i32.i64.nxv4i32.i32.i64(i64 3, <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_sf_vc_v_xv_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) {
@@ -1664,12 +1664,12 @@ vuint32m2_t test_sf_vc_v_xv_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u32m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xv.nxv8i32.i32.i32.i32(i32 3, <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xv.nxv8i32.i32.nxv8i32.i32.i32(i32 3, <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xv.nxv8i32.i64.i32.i64(i64 3, <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xv.nxv8i32.i64.nxv8i32.i32.i64(i64 3, <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_sf_vc_v_xv_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) {
@@ -1678,12 +1678,12 @@ vuint32m4_t test_sf_vc_v_xv_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u32m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xv.nxv16i32.i32.i32.i32(i32 3, <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xv.nxv16i32.i32.nxv16i32.i32.i32(i32 3, <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xv.nxv16i32.i64.i32.i64(i64 3, <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xv.nxv16i32.i64.nxv16i32.i32.i64(i64 3, <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_sf_vc_v_xv_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) {
@@ -2000,12 +2000,12 @@ void test_sf_vc_iv_se_u64m8(vuint64m8_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u8mf8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv1i8.i32.i32.i32(i32 3, <vscale x 1 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv1i8.i32.nxv1i8.i32.i32(i32 3, <vscale x 1 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv1i8.i64.i64.i64(i64 3, <vscale x 1 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv1i8.i64.nxv1i8.i64.i64(i64 3, <vscale x 1 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_sf_vc_v_iv_se_u8mf8(vuint8mf8_t vs2, size_t vl) {
@@ -2014,12 +2014,12 @@ vuint8mf8_t test_sf_vc_v_iv_se_u8mf8(vuint8mf8_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u8mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv2i8.i32.i32.i32(i32 3, <vscale x 2 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv2i8.i32.nxv2i8.i32.i32(i32 3, <vscale x 2 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv2i8.i64.i64.i64(i64 3, <vscale x 2 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv2i8.i64.nxv2i8.i64.i64(i64 3, <vscale x 2 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_sf_vc_v_iv_se_u8mf4(vuint8mf4_t vs2, size_t vl) {
@@ -2028,12 +2028,12 @@ vuint8mf4_t test_sf_vc_v_iv_se_u8mf4(vuint8mf4_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u8mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv4i8.i32.i32.i32(i32 3, <vscale x 4 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv4i8.i32.nxv4i8.i32.i32(i32 3, <vscale x 4 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv4i8.i64.i64.i64(i64 3, <vscale x 4 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv4i8.i64.nxv4i8.i64.i64(i64 3, <vscale x 4 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_sf_vc_v_iv_se_u8mf2(vuint8mf2_t vs2, size_t vl) {
@@ -2042,12 +2042,12 @@ vuint8mf2_t test_sf_vc_v_iv_se_u8mf2(vuint8mf2_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u8m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv8i8.i32.i32.i32(i32 3, <vscale x 8 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv8i8.i32.nxv8i8.i32.i32(i32 3, <vscale x 8 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv8i8.i64.i64.i64(i64 3, <vscale x 8 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv8i8.i64.nxv8i8.i64.i64(i64 3, <vscale x 8 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_sf_vc_v_iv_se_u8m1(vuint8m1_t vs2, size_t vl) {
@@ -2056,12 +2056,12 @@ vuint8m1_t test_sf_vc_v_iv_se_u8m1(vuint8m1_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u8m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv16i8.i32.i32.i32(i32 3, <vscale x 16 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv16i8.i32.nxv16i8.i32.i32(i32 3, <vscale x 16 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv16i8.i64.i64.i64(i64 3, <vscale x 16 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv16i8.i64.nxv16i8.i64.i64(i64 3, <vscale x 16 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_sf_vc_v_iv_se_u8m2(vuint8m2_t vs2, size_t vl) {
@@ -2070,12 +2070,12 @@ vuint8m2_t test_sf_vc_v_iv_se_u8m2(vuint8m2_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u8m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv32i8.i32.i32.i32(i32 3, <vscale x 32 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv32i8.i32.nxv32i8.i32.i32(i32 3, <vscale x 32 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv32i8.i64.i64.i64(i64 3, <vscale x 32 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv32i8.i64.nxv32i8.i64.i64(i64 3, <vscale x 32 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_sf_vc_v_iv_se_u8m4(vuint8m4_t vs2, size_t vl) {
@@ -2084,12 +2084,12 @@ vuint8m4_t test_sf_vc_v_iv_se_u8m4(vuint8m4_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u8m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv64i8.i32.i32.i32(i32 3, <vscale x 64 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv64i8.i32.nxv64i8.i32.i32(i32 3, <vscale x 64 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv64i8.i64.i64.i64(i64 3, <vscale x 64 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv64i8.i64.nxv64i8.i64.i64(i64 3, <vscale x 64 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_sf_vc_v_iv_se_u8m8(vuint8m8_t vs2, size_t vl) {
@@ -2098,12 +2098,12 @@ vuint8m8_t test_sf_vc_v_iv_se_u8m8(vuint8m8_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u16mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv1i16.i32.i32.i32(i32 3, <vscale x 1 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv1i16.i32.nxv1i16.i32.i32(i32 3, <vscale x 1 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv1i16.i64.i64.i64(i64 3, <vscale x 1 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv1i16.i64.nxv1i16.i64.i64(i64 3, <vscale x 1 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_sf_vc_v_iv_se_u16mf4(vuint16mf4_t vs2, size_t vl) {
@@ -2112,12 +2112,12 @@ vuint16mf4_t test_sf_vc_v_iv_se_u16mf4(vuint16mf4_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u16mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv2i16.i32.i32.i32(i32 3, <vscale x 2 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv2i16.i32.nxv2i16.i32.i32(i32 3, <vscale x 2 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv2i16.i64.i64.i64(i64 3, <vscale x 2 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv2i16.i64.nxv2i16.i64.i64(i64 3, <vscale x 2 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_sf_vc_v_iv_se_u16mf2(vuint16mf2_t vs2, size_t vl) {
@@ -2126,12 +2126,12 @@ vuint16mf2_t test_sf_vc_v_iv_se_u16mf2(vuint16mf2_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u16m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv4i16.i32.i32.i32(i32 3, <vscale x 4 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv4i16.i32.nxv4i16.i32.i32(i32 3, <vscale x 4 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv4i16.i64.i64.i64(i64 3, <vscale x 4 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv4i16.i64.nxv4i16.i64.i64(i64 3, <vscale x 4 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_sf_vc_v_iv_se_u16m1(vuint16m1_t vs2, size_t vl) {
@@ -2140,12 +2140,12 @@ vuint16m1_t test_sf_vc_v_iv_se_u16m1(vuint16m1_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u16m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv8i16.i32.i32.i32(i32 3, <vscale x 8 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv8i16.i32.nxv8i16.i32.i32(i32 3, <vscale x 8 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv8i16.i64.i64.i64(i64 3, <vscale x 8 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv8i16.i64.nxv8i16.i64.i64(i64 3, <vscale x 8 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_sf_vc_v_iv_se_u16m2(vuint16m2_t vs2, size_t vl) {
@@ -2154,12 +2154,12 @@ vuint16m2_t test_sf_vc_v_iv_se_u16m2(vuint16m2_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u16m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv16i16.i32.i32.i32(i32 3, <vscale x 16 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv16i16.i32.nxv16i16.i32.i32(i32 3, <vscale x 16 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv16i16.i64.i64.i64(i64 3, <vscale x 16 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv16i16.i64.nxv16i16.i64.i64(i64 3, <vscale x 16 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_sf_vc_v_iv_se_u16m4(vuint16m4_t vs2, size_t vl) {
@@ -2168,12 +2168,12 @@ vuint16m4_t test_sf_vc_v_iv_se_u16m4(vuint16m4_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u16m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv32i16.i32.i32.i32(i32 3, <vscale x 32 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv32i16.i32.nxv32i16.i32.i32(i32 3, <vscale x 32 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv32i16.i64.i64.i64(i64 3, <vscale x 32 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv32i16.i64.nxv32i16.i64.i64(i64 3, <vscale x 32 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_sf_vc_v_iv_se_u16m8(vuint16m8_t vs2, size_t vl) {
@@ -2182,12 +2182,12 @@ vuint16m8_t test_sf_vc_v_iv_se_u16m8(vuint16m8_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u32mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv1i32.i32.i32.i32(i32 3, <vscale x 1 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv1i32.i32.nxv1i32.i32.i32(i32 3, <vscale x 1 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv1i32.i64.i64.i64(i64 3, <vscale x 1 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv1i32.i64.nxv1i32.i64.i64(i64 3, <vscale x 1 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_sf_vc_v_iv_se_u32mf2(vuint32mf2_t vs2, size_t vl) {
@@ -2196,12 +2196,12 @@ vuint32mf2_t test_sf_vc_v_iv_se_u32mf2(vuint32mf2_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u32m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv2i32.i32.i32.i32(i32 3, <vscale x 2 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv2i32.i32.nxv2i32.i32.i32(i32 3, <vscale x 2 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv2i32.i64.i64.i64(i64 3, <vscale x 2 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv2i32.i64.nxv2i32.i64.i64(i64 3, <vscale x 2 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_sf_vc_v_iv_se_u32m1(vuint32m1_t vs2, size_t vl) {
@@ -2210,12 +2210,12 @@ vuint32m1_t test_sf_vc_v_iv_se_u32m1(vuint32m1_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u32m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv4i32.i32.i32.i32(i32 3, <vscale x 4 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv4i32.i32.nxv4i32.i32.i32(i32 3, <vscale x 4 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv4i32.i64.i64.i64(i64 3, <vscale x 4 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv4i32.i64.nxv4i32.i64.i64(i64 3, <vscale x 4 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_sf_vc_v_iv_se_u32m2(vuint32m2_t vs2, size_t vl) {
@@ -2224,12 +2224,12 @@ vuint32m2_t test_sf_vc_v_iv_se_u32m2(vuint32m2_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u32m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv8i32.i32.i32.i32(i32 3, <vscale x 8 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv8i32.i32.nxv8i32.i32.i32(i32 3, <vscale x 8 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv8i32.i64.i64.i64(i64 3, <vscale x 8 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv8i32.i64.nxv8i32.i64.i64(i64 3, <vscale x 8 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_sf_vc_v_iv_se_u32m4(vuint32m4_t vs2, size_t vl) {
@@ -2238,12 +2238,12 @@ vuint32m4_t test_sf_vc_v_iv_se_u32m4(vuint32m4_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u32m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv16i32.i32.i32.i32(i32 3, <vscale x 16 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv16i32.i32.nxv16i32.i32.i32(i32 3, <vscale x 16 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv16i32.i64.i64.i64(i64 3, <vscale x 16 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv16i32.i64.nxv16i32.i64.i64(i64 3, <vscale x 16 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_sf_vc_v_iv_se_u32m8(vuint32m8_t vs2, size_t vl) {
@@ -2252,12 +2252,12 @@ vuint32m8_t test_sf_vc_v_iv_se_u32m8(vuint32m8_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u64m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv1i64.i32.i32.i32(i32 3, <vscale x 1 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv1i64.i32.nxv1i64.i32.i32(i32 3, <vscale x 1 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv1i64.i64.i64.i64(i64 3, <vscale x 1 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv1i64.i64.nxv1i64.i64.i64(i64 3, <vscale x 1 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_sf_vc_v_iv_se_u64m1(vuint64m1_t vs2, size_t vl) {
@@ -2266,12 +2266,12 @@ vuint64m1_t test_sf_vc_v_iv_se_u64m1(vuint64m1_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u64m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv2i64.i32.i32.i32(i32 3, <vscale x 2 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv2i64.i32.nxv2i64.i32.i32(i32 3, <vscale x 2 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv2i64.i64.i64.i64(i64 3, <vscale x 2 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv2i64.i64.nxv2i64.i64.i64(i64 3, <vscale x 2 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_sf_vc_v_iv_se_u64m2(vuint64m2_t vs2, size_t vl) {
@@ -2280,12 +2280,12 @@ vuint64m2_t test_sf_vc_v_iv_se_u64m2(vuint64m2_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u64m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv4i64.i32.i32.i32(i32 3, <vscale x 4 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv4i64.i32.nxv4i64.i32.i32(i32 3, <vscale x 4 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv4i64.i64.i64.i64(i64 3, <vscale x 4 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv4i64.i64.nxv4i64.i64.i64(i64 3, <vscale x 4 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_sf_vc_v_iv_se_u64m4(vuint64m4_t vs2, size_t vl) {
@@ -2294,12 +2294,12 @@ vuint64m4_t test_sf_vc_v_iv_se_u64m4(vuint64m4_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u64m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv8i64.i32.i32.i32(i32 3, <vscale x 8 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv8i64.i32.nxv8i64.i32.i32(i32 3, <vscale x 8 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv8i64.i64.i64.i64(i64 3, <vscale x 8 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv8i64.i64.nxv8i64.i64.i64(i64 3, <vscale x 8 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_sf_vc_v_iv_se_u64m8(vuint64m8_t vs2, size_t vl) {
@@ -2308,12 +2308,12 @@ vuint64m8_t test_sf_vc_v_iv_se_u64m8(vuint64m8_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u8mf8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.iv.nxv1i8.i32.i32.i32(i32 3, <vscale x 1 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.iv.nxv1i8.i32.nxv1i8.i32.i32(i32 3, <vscale x 1 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.iv.nxv1i8.i64.i64.i64(i64 3, <vscale x 1 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.iv.nxv1i8.i64.nxv1i8.i64.i64(i64 3, <vscale x 1 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_sf_vc_v_iv_u8mf8(vuint8mf8_t vs2, size_t vl) {
@@ -2322,12 +2322,12 @@ vuint8mf8_t test_sf_vc_v_iv_u8mf8(vuint8mf8_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u8mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.iv.nxv2i8.i32.i32.i32(i32 3, <vscale x 2 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.iv.nxv2i8.i32.nxv2i8.i32.i32(i32 3, <vscale x 2 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.iv.nxv2i8.i64.i64.i64(i64 3, <vscale x 2 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.iv.nxv2i8.i64.nxv2i8.i64.i64(i64 3, <vscale x 2 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_sf_vc_v_iv_u8mf4(vuint8mf4_t vs2, size_t vl) {
@@ -2336,12 +2336,12 @@ vuint8mf4_t test_sf_vc_v_iv_u8mf4(vuint8mf4_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u8mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.iv.nxv4i8.i32.i32.i32(i32 3, <vscale x 4 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.iv.nxv4i8.i32.nxv4i8.i32.i32(i32 3, <vscale x 4 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.iv.nxv4i8.i64.i64.i64(i64 3, <vscale x 4 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.iv.nxv4i8.i64.nxv4i8.i64.i64(i64 3, <vscale x 4 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_sf_vc_v_iv_u8mf2(vuint8mf2_t vs2, size_t vl) {
@@ -2350,12 +2350,12 @@ vuint8mf2_t test_sf_vc_v_iv_u8mf2(vuint8mf2_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u8m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.iv.nxv8i8.i32.i32.i32(i32 3, <vscale x 8 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.iv.nxv8i8.i32.nxv8i8.i32.i32(i32 3, <vscale x 8 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.iv.nxv8i8.i64.i64.i64(i64 3, <vscale x 8 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.iv.nxv8i8.i64.nxv8i8.i64.i64(i64 3, <vscale x 8 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_sf_vc_v_iv_u8m1(vuint8m1_t vs2, size_t vl) {
@@ -2364,12 +2364,12 @@ vuint8m1_t test_sf_vc_v_iv_u8m1(vuint8m1_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u8m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.iv.nxv16i8.i32.i32.i32(i32 3, <vscale x 16 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.iv.nxv16i8.i32.nxv16i8.i32.i32(i32 3, <vscale x 16 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.iv.nxv16i8.i64.i64.i64(i64 3, <vscale x 16 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.iv.nxv16i8.i64.nxv16i8.i64.i64(i64 3, <vscale x 16 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_sf_vc_v_iv_u8m2(vuint8m2_t vs2, size_t vl) {
@@ -2378,12 +2378,12 @@ vuint8m2_t test_sf_vc_v_iv_u8m2(vuint8m2_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u8m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.iv.nxv32i8.i32.i32.i32(i32 3, <vscale x 32 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.iv.nxv32i8.i32.nxv32i8.i32.i32(i32 3, <vscale x 32 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.iv.nxv32i8.i64.i64.i64(i64 3, <vscale x 32 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.iv.nxv32i8.i64.nxv32i8.i64.i64(i64 3, <vscale x 32 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_sf_vc_v_iv_u8m4(vuint8m4_t vs2, size_t vl) {
@@ -2392,12 +2392,12 @@ vuint8m4_t test_sf_vc_v_iv_u8m4(vuint8m4_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u8m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.iv.nxv64i8.i32.i32.i32(i32 3, <vscale x 64 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.iv.nxv64i8.i32.nxv64i8.i32.i32(i32 3, <vscale x 64 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.iv.nxv64i8.i64.i64.i64(i64 3, <vscale x 64 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.iv.nxv64i8.i64.nxv64i8.i64.i64(i64 3, <vscale x 64 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_sf_vc_v_iv_u8m8(vuint8m8_t vs2, size_t vl) {
@@ -2406,12 +2406,12 @@ vuint8m8_t test_sf_vc_v_iv_u8m8(vuint8m8_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u16mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.iv.nxv1i16.i32.i32.i32(i32 3, <vscale x 1 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.iv.nxv1i16.i32.nxv1i16.i32.i32(i32 3, <vscale x 1 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.iv.nxv1i16.i64.i64.i64(i64 3, <vscale x 1 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.iv.nxv1i16.i64.nxv1i16.i64.i64(i64 3, <vscale x 1 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_sf_vc_v_iv_u16mf4(vuint16mf4_t vs2, size_t vl) {
@@ -2420,12 +2420,12 @@ vuint16mf4_t test_sf_vc_v_iv_u16mf4(vuint16mf4_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u16mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.iv.nxv2i16.i32.i32.i32(i32 3, <vscale x 2 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.iv.nxv2i16.i32.nxv2i16.i32.i32(i32 3, <vscale x 2 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.iv.nxv2i16.i64.i64.i64(i64 3, <vscale x 2 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.iv.nxv2i16.i64.nxv2i16.i64.i64(i64 3, <vscale x 2 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_sf_vc_v_iv_u16mf2(vuint16mf2_t vs2, size_t vl) {
@@ -2434,12 +2434,12 @@ vuint16mf2_t test_sf_vc_v_iv_u16mf2(vuint16mf2_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u16m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.iv.nxv4i16.i32.i32.i32(i32 3, <vscale x 4 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.iv.nxv4i16.i32.nxv4i16.i32.i32(i32 3, <vscale x 4 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.iv.nxv4i16.i64.i64.i64(i64 3, <vscale x 4 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.iv.nxv4i16.i64.nxv4i16.i64.i64(i64 3, <vscale x 4 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_sf_vc_v_iv_u16m1(vuint16m1_t vs2, size_t vl) {
@@ -2448,12 +2448,12 @@ vuint16m1_t test_sf_vc_v_iv_u16m1(vuint16m1_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u16m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.iv.nxv8i16.i32.i32.i32(i32 3, <vscale x 8 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.iv.nxv8i16.i32.nxv8i16.i32.i32(i32 3, <vscale x 8 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.iv.nxv8i16.i64.i64.i64(i64 3, <vscale x 8 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.iv.nxv8i16.i64.nxv8i16.i64.i64(i64 3, <vscale x 8 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_sf_vc_v_iv_u16m2(vuint16m2_t vs2, size_t vl) {
@@ -2462,12 +2462,12 @@ vuint16m2_t test_sf_vc_v_iv_u16m2(vuint16m2_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u16m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.iv.nxv16i16.i32.i32.i32(i32 3, <vscale x 16 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.iv.nxv16i16.i32.nxv16i16.i32.i32(i32 3, <vscale x 16 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.iv.nxv16i16.i64.i64.i64(i64 3, <vscale x 16 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.iv.nxv16i16.i64.nxv16i16.i64.i64(i64 3, <vscale x 16 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_sf_vc_v_iv_u16m4(vuint16m4_t vs2, size_t vl) {
@@ -2476,12 +2476,12 @@ vuint16m4_t test_sf_vc_v_iv_u16m4(vuint16m4_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u16m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.iv.nxv32i16.i32.i32.i32(i32 3, <vscale x 32 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.iv.nxv32i16.i32.nxv32i16.i32.i32(i32 3, <vscale x 32 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.iv.nxv32i16.i64.i64.i64(i64 3, <vscale x 32 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.iv.nxv32i16.i64.nxv32i16.i64.i64(i64 3, <vscale x 32 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_sf_vc_v_iv_u16m8(vuint16m8_t vs2, size_t vl) {
@@ -2490,12 +2490,12 @@ vuint16m8_t test_sf_vc_v_iv_u16m8(vuint16m8_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u32mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.iv.nxv1i32.i32.i32.i32(i32 3, <vscale x 1 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.iv.nxv1i32.i32.nxv1i32.i32.i32(i32 3, <vscale x 1 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.iv.nxv1i32.i64.i64.i64(i64 3, <vscale x 1 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.iv.nxv1i32.i64.nxv1i32.i64.i64(i64 3, <vscale x 1 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_sf_vc_v_iv_u32mf2(vuint32mf2_t vs2, size_t vl) {
@@ -2504,12 +2504,12 @@ vuint32mf2_t test_sf_vc_v_iv_u32mf2(vuint32mf2_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u32m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.iv.nxv2i32.i32.i32.i32(i32 3, <vscale x 2 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.iv.nxv2i32.i32.nxv2i32.i32.i32(i32 3, <vscale x 2 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.iv.nxv2i32.i64.i64.i64(i64 3, <vscale x 2 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.iv.nxv2i32.i64.nxv2i32.i64.i64(i64 3, <vscale x 2 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_sf_vc_v_iv_u32m1(vuint32m1_t vs2, size_t vl) {
@@ -2518,12 +2518,12 @@ vuint32m1_t test_sf_vc_v_iv_u32m1(vuint32m1_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u32m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.iv.nxv4i32.i32.i32.i32(i32 3, <vscale x 4 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.iv.nxv4i32.i32.nxv4i32.i32.i32(i32 3, <vscale x 4 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.iv.nxv4i32.i64.i64.i64(i64 3, <vscale x 4 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.iv.nxv4i32.i64.nxv4i32.i64.i64(i64 3, <vscale x 4 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_sf_vc_v_iv_u32m2(vuint32m2_t vs2, size_t vl) {
@@ -2532,12 +2532,12 @@ vuint32m2_t test_sf_vc_v_iv_u32m2(vuint32m2_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u32m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.iv.nxv8i32.i32.i32.i32(i32 3, <vscale x 8 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.iv.nxv8i32.i32.nxv8i32.i32.i32(i32 3, <vscale x 8 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.iv.nxv8i32.i64.i64.i64(i64 3, <vscale x 8 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.iv.nxv8i32.i64.nxv8i32.i64.i64(i64 3, <vscale x 8 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_sf_vc_v_iv_u32m4(vuint32m4_t vs2, size_t vl) {
@@ -2546,12 +2546,12 @@ vuint32m4_t test_sf_vc_v_iv_u32m4(vuint32m4_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u32m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.iv.nxv16i32.i32.i32.i32(i32 3, <vscale x 16 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.iv.nxv16i32.i32.nxv16i32.i32.i32(i32 3, <vscale x 16 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.iv.nxv16i32.i64.i64.i64(i64 3, <vscale x 16 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.iv.nxv16i32.i64.nxv16i32.i64.i64(i64 3, <vscale x 16 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_sf_vc_v_iv_u32m8(vuint32m8_t vs2, size_t vl) {
@@ -2560,12 +2560,12 @@ vuint32m8_t test_sf_vc_v_iv_u32m8(vuint32m8_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u64m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.iv.nxv1i64.i32.i32.i32(i32 3, <vscale x 1 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.iv.nxv1i64.i32.nxv1i64.i32.i32(i32 3, <vscale x 1 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.iv.nxv1i64.i64.i64.i64(i64 3, <vscale x 1 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.iv.nxv1i64.i64.nxv1i64.i64.i64(i64 3, <vscale x 1 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_sf_vc_v_iv_u64m1(vuint64m1_t vs2, size_t vl) {
@@ -2574,12 +2574,12 @@ vuint64m1_t test_sf_vc_v_iv_u64m1(vuint64m1_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u64m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.iv.nxv2i64.i32.i32.i32(i32 3, <vscale x 2 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.iv.nxv2i64.i32.nxv2i64.i32.i32(i32 3, <vscale x 2 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.iv.nxv2i64.i64.i64.i64(i64 3, <vscale x 2 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.iv.nxv2i64.i64.nxv2i64.i64.i64(i64 3, <vscale x 2 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_sf_vc_v_iv_u64m2(vuint64m2_t vs2, size_t vl) {
@@ -2588,12 +2588,12 @@ vuint64m2_t test_sf_vc_v_iv_u64m2(vuint64m2_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u64m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.iv.nxv4i64.i32.i32.i32(i32 3, <vscale x 4 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.iv.nxv4i64.i32.nxv4i64.i32.i32(i32 3, <vscale x 4 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.iv.nxv4i64.i64.i64.i64(i64 3, <vscale x 4 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.iv.nxv4i64.i64.nxv4i64.i64.i64(i64 3, <vscale x 4 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_sf_vc_v_iv_u64m4(vuint64m4_t vs2, size_t vl) {
@@ -2602,12 +2602,12 @@ vuint64m4_t test_sf_vc_v_iv_u64m4(vuint64m4_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u64m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.iv.nxv8i64.i32.i32.i32(i32 3, <vscale x 8 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.iv.nxv8i64.i32.nxv8i64.i32.i32(i32 3, <vscale x 8 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.iv.nxv8i64.i64.i64.i64(i64 3, <vscale x 8 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.iv.nxv8i64.i64.nxv8i64.i64.i64(i64 3, <vscale x 8 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_sf_vc_v_iv_u64m8(vuint64m8_t vs2, size_t vl) {
@@ -2826,12 +2826,12 @@ void test_sf_vc_fv_se_u64m8(vuint64m8_t vs2, double fs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u16mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv1i16.i32.f16.i32(i32 1, <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv1i16.i32.nxv1i16.f16.i32(i32 1, <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv1i16.i64.f16.i64(i64 1, <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv1i16.i64.nxv1i16.f16.i64(i64 1, <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_sf_vc_v_fv_se_u16mf4(vuint16mf4_t vs2, _Float16 fs1, size_t vl) {
@@ -2840,12 +2840,12 @@ vuint16mf4_t test_sf_vc_v_fv_se_u16mf4(vuint16mf4_t vs2, _Float16 fs1, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u16mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv2i16.i32.f16.i32(i32 1, <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv2i16.i32.nxv2i16.f16.i32(i32 1, <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv2i16.i64.f16.i64(i64 1, <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv2i16.i64.nxv2i16.f16.i64(i64 1, <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_sf_vc_v_fv_se_u16mf2(vuint16mf2_t vs2, _Float16 fs1, size_t vl) {
@@ -2854,12 +2854,12 @@ vuint16mf2_t test_sf_vc_v_fv_se_u16mf2(vuint16mf2_t vs2, _Float16 fs1, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u16m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv4i16.i32.f16.i32(i32 1, <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv4i16.i32.nxv4i16.f16.i32(i32 1, <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv4i16.i64.f16.i64(i64 1, <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv4i16.i64.nxv4i16.f16.i64(i64 1, <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_sf_vc_v_fv_se_u16m1(vuint16m1_t vs2, _Float16 fs1, size_t vl) {
@@ -2868,12 +2868,12 @@ vuint16m1_t test_sf_vc_v_fv_se_u16m1(vuint16m1_t vs2, _Float16 fs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u16m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv8i16.i32.f16.i32(i32 1, <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv8i16.i32.nxv8i16.f16.i32(i32 1, <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv8i16.i64.f16.i64(i64 1, <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv8i16.i64.nxv8i16.f16.i64(i64 1, <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_sf_vc_v_fv_se_u16m2(vuint16m2_t vs2, _Float16 fs1, size_t vl) {
@@ -2882,12 +2882,12 @@ vuint16m2_t test_sf_vc_v_fv_se_u16m2(vuint16m2_t vs2, _Float16 fs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u16m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv16i16.i32.f16.i32(i32 1, <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv16i16.i32.nxv16i16.f16.i32(i32 1, <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv16i16.i64.f16.i64(i64 1, <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv16i16.i64.nxv16i16.f16.i64(i64 1, <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_sf_vc_v_fv_se_u16m4(vuint16m4_t vs2, _Float16 fs1, size_t vl) {
@@ -2896,12 +2896,12 @@ vuint16m4_t test_sf_vc_v_fv_se_u16m4(vuint16m4_t vs2, _Float16 fs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u16m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv32i16.i32.f16.i32(i32 1, <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv32i16.i32.nxv32i16.f16.i32(i32 1, <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv32i16.i64.f16.i64(i64 1, <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv32i16.i64.nxv32i16.f16.i64(i64 1, <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_sf_vc_v_fv_se_u16m8(vuint16m8_t vs2, _Float16 fs1, size_t vl) {
@@ -2910,12 +2910,12 @@ vuint16m8_t test_sf_vc_v_fv_se_u16m8(vuint16m8_t vs2, _Float16 fs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u32mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv1i32.i32.f32.i32(i32 1, <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv1i32.i32.nxv1i32.f32.i32(i32 1, <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv1i32.i64.f32.i64(i64 1, <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv1i32.i64.nxv1i32.f32.i64(i64 1, <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_sf_vc_v_fv_se_u32mf2(vuint32mf2_t vs2, float fs1, size_t vl) {
@@ -2924,12 +2924,12 @@ vuint32mf2_t test_sf_vc_v_fv_se_u32mf2(vuint32mf2_t vs2, float fs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u32m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv2i32.i32.f32.i32(i32 1, <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv2i32.i32.nxv2i32.f32.i32(i32 1, <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv2i32.i64.f32.i64(i64 1, <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv2i32.i64.nxv2i32.f32.i64(i64 1, <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_sf_vc_v_fv_se_u32m1(vuint32m1_t vs2, float fs1, size_t vl) {
@@ -2938,12 +2938,12 @@ vuint32m1_t test_sf_vc_v_fv_se_u32m1(vuint32m1_t vs2, float fs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u32m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv4i32.i32.f32.i32(i32 1, <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv4i32.i32.nxv4i32.f32.i32(i32 1, <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv4i32.i64.f32.i64(i64 1, <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv4i32.i64.nxv4i32.f32.i64(i64 1, <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_sf_vc_v_fv_se_u32m2(vuint32m2_t vs2, float fs1, size_t vl) {
@@ -2952,12 +2952,12 @@ vuint32m2_t test_sf_vc_v_fv_se_u32m2(vuint32m2_t vs2, float fs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u32m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv8i32.i32.f32.i32(i32 1, <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv8i32.i32.nxv8i32.f32.i32(i32 1, <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv8i32.i64.f32.i64(i64 1, <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv8i32.i64.nxv8i32.f32.i64(i64 1, <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_sf_vc_v_fv_se_u32m4(vuint32m4_t vs2, float fs1, size_t vl) {
@@ -2966,12 +2966,12 @@ vuint32m4_t test_sf_vc_v_fv_se_u32m4(vuint32m4_t vs2, float fs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u32m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv16i32.i32.f32.i32(i32 1, <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv16i32.i32.nxv16i32.f32.i32(i32 1, <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv16i32.i64.f32.i64(i64 1, <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv16i32.i64.nxv16i32.f32.i64(i64 1, <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_sf_vc_v_fv_se_u32m8(vuint32m8_t vs2, float fs1, size_t vl) {
@@ -2980,12 +2980,12 @@ vuint32m8_t test_sf_vc_v_fv_se_u32m8(vuint32m8_t vs2, float fs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u64m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv1i64.i32.f64.i32(i32 1, <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv1i64.i32.nxv1i64.f64.i32(i32 1, <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv1i64.i64.f64.i64(i64 1, <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv1i64.i64.nxv1i64.f64.i64(i64 1, <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_sf_vc_v_fv_se_u64m1(vuint64m1_t vs2, double fs1, size_t vl) {
@@ -2994,12 +2994,12 @@ vuint64m1_t test_sf_vc_v_fv_se_u64m1(vuint64m1_t vs2, double fs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u64m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv2i64.i32.f64.i32(i32 1, <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv2i64.i32.nxv2i64.f64.i32(i32 1, <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv2i64.i64.f64.i64(i64 1, <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv2i64.i64.nxv2i64.f64.i64(i64 1, <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_sf_vc_v_fv_se_u64m2(vuint64m2_t vs2, double fs1, size_t vl) {
@@ -3008,12 +3008,12 @@ vuint64m2_t test_sf_vc_v_fv_se_u64m2(vuint64m2_t vs2, double fs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u64m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv4i64.i32.f64.i32(i32 1, <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv4i64.i32.nxv4i64.f64.i32(i32 1, <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv4i64.i64.f64.i64(i64 1, <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv4i64.i64.nxv4i64.f64.i64(i64 1, <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_sf_vc_v_fv_se_u64m4(vuint64m4_t vs2, double fs1, size_t vl) {
@@ -3022,12 +3022,12 @@ vuint64m4_t test_sf_vc_v_fv_se_u64m4(vuint64m4_t vs2, double fs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u64m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv8i64.i32.f64.i32(i32 1, <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv8i64.i32.nxv8i64.f64.i32(i32 1, <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv8i64.i64.f64.i64(i64 1, <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv8i64.i64.nxv8i64.f64.i64(i64 1, <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_sf_vc_v_fv_se_u64m8(vuint64m8_t vs2, double fs1, size_t vl) {
@@ -3036,12 +3036,12 @@ vuint64m8_t test_sf_vc_v_fv_se_u64m8(vuint64m8_t vs2, double fs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fv_u16mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fv.nxv1i16.i32.f16.i32(i32 1, <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fv.nxv1i16.i32.nxv1i16.f16.i32(i32 1, <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fv.nxv1i16.i64.f16.i64(i64 1, <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fv.nxv1i16.i64.nxv1i16.f16.i64(i64 1, <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_sf_vc_v_fv_u16mf4(vuint16mf4_t vs2, _Float16 fs1, size_t vl) {
@@ -3050,12 +3050,12 @@ vuint16mf4_t test_sf_vc_v_fv_u16mf4(vuint16mf4_t vs2, _Float16 fs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fv_u16mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fv.nxv2i16.i32.f16.i32(i32 1, <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fv.nxv2i16.i32.nxv2i16.f16.i32(i32 1, <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fv.nxv2i16.i64.f16.i64(i64 1, <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fv.nxv2i16.i64.nxv2i16.f16.i64(i64 1, <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_sf_vc_v_fv_u16mf2(vuint16mf2_t vs2, _Float16 fs1, size_t vl) {
@@ -3064,12 +3064,12 @@ vuint16mf2_t test_sf_vc_v_fv_u16mf2(vuint16mf2_t vs2, _Float16 fs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fv_u16m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fv.nxv4i16.i32.f16.i32(i32 1, <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fv.nxv4i16.i32.nxv4i16.f16.i32(i32 1, <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fv.nxv4i16.i64.f16.i64(i64 1, <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fv.nxv4i16.i64.nxv4i16.f16.i64(i64 1, <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_sf_vc_v_fv_u16m1(vuint16m1_t vs2, _Float16 fs1, size_t vl) {
@@ -3078,12 +3078,12 @@ vuint16m1_t test_sf_vc_v_fv_u16m1(vuint16m1_t vs2, _Float16 fs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fv_u16m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fv.nxv8i16.i32.f16.i32(i32 1, <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fv.nxv8i16.i32.nxv8i16.f16.i32(i32 1, <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fv.nxv8i16.i64.f16.i64(i64 1, <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fv.nxv8i16.i64.nxv8i16.f16.i64(i64 1, <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_sf_vc_v_fv_u16m2(vuint16m2_t vs2, _Float16 fs1, size_t vl) {
@@ -3092,12 +3092,12 @@ vuint16m2_t test_sf_vc_v_fv_u16m2(vuint16m2_t vs2, _Float16 fs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fv_u16m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fv.nxv16i16.i32.f16.i32(i32 1, <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fv.nxv16i16.i32.nxv16i16.f16.i32(i32 1, <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fv.nxv16i16.i64.f16.i64(i64 1, <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fv.nxv16i16.i64.nxv16i16.f16.i64(i64 1, <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_sf_vc_v_fv_u16m4(vuint16m4_t vs2, _Float16 fs1, size_t vl) {
@@ -3106,12 +3106,12 @@ vuint16m4_t test_sf_vc_v_fv_u16m4(vuint16m4_t vs2, _Float16 fs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fv_u16m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fv.nxv32i16.i32.f16.i32(i32 1, <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fv.nxv32i16.i32.nxv32i16.f16.i32(i32 1, <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fv.nxv32i16.i64.f16.i64(i64 1, <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fv.nxv32i16.i64.nxv32i16.f16.i64(i64 1, <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_sf_vc_v_fv_u16m8(vuint16m8_t vs2, _Float16 fs1, size_t vl) {
@@ -3120,12 +3120,12 @@ vuint16m8_t test_sf_vc_v_fv_u16m8(vuint16m8_t vs2, _Float16 fs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fv_u32mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fv.nxv1i32.i32.f32.i32(i32 1, <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fv.nxv1i32.i32.nxv1i32.f32.i32(i32 1, <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fv.nxv1i32.i64.f32.i64(i64 1, <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fv.nxv1i32.i64.nxv1i32.f32.i64(i64 1, <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_sf_vc_v_fv_u32mf2(vuint32mf2_t vs2, float fs1, size_t vl) {
@@ -3134,12 +3134,12 @@ vuint32mf2_t test_sf_vc_v_fv_u32mf2(vuint32mf2_t vs2, float fs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fv_u32m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fv.nxv2i32.i32.f32.i32(i32 1, <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fv.nxv2i32.i32.nxv2i32.f32.i32(i32 1, <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fv.nxv2i32.i64.f32.i64(i64 1, <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fv.nxv2i32.i64.nxv2i32.f32.i64(i64 1, <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_sf_vc_v_fv_u32m1(vuint32m1_t vs2, float fs1, size_t vl) {
@@ -3148,12 +3148,12 @@ vuint32m1_t test_sf_vc_v_fv_u32m1(vuint32m1_t vs2, float fs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fv_u32m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fv.nxv4i32.i32.f32.i32(i32 1, <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fv.nxv4i32.i32.nxv4i32.f32.i32(i32 1, <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fv.nxv4i32.i64.f32.i64(i64 1, <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fv.nxv4i32.i64.nxv4i32.f32.i64(i64 1, <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_sf_vc_v_fv_u32m2(vuint32m2_t vs2, float fs1, size_t vl) {
@@ -3162,12 +3162,12 @@ vuint32m2_t test_sf_vc_v_fv_u32m2(vuint32m2_t vs2, float fs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fv_u32m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fv.nxv8i32.i32.f32.i32(i32 1, <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fv.nxv8i32.i32.nxv8i32.f32.i32(i32 1, <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fv.nxv8i32.i64.f32.i64(i64 1, <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fv.nxv8i32.i64.nxv8i32.f32.i64(i64 1, <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_sf_vc_v_fv_u32m4(vuint32m4_t vs2, float fs1, size_t vl) {
@@ -3176,12 +3176,12 @@ vuint32m4_t test_sf_vc_v_fv_u32m4(vuint32m4_t vs2, float fs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fv_u32m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fv.nxv16i32.i32.f32.i32(i32 1, <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fv.nxv16i32.i32.nxv16i32.f32.i32(i32 1, <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fv.nxv16i32.i64.f32.i64(i64 1, <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fv.nxv16i32.i64.nxv16i32.f32.i64(i64 1, <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_sf_vc_v_fv_u32m8(vuint32m8_t vs2, float fs1, size_t vl) {
@@ -3190,12 +3190,12 @@ vuint32m8_t test_sf_vc_v_fv_u32m8(vuint32m8_t vs2, float fs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fv_u64m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fv.nxv1i64.i32.f64.i32(i32 1, <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fv.nxv1i64.i32.nxv1i64.f64.i32(i32 1, <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fv.nxv1i64.i64.f64.i64(i64 1, <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fv.nxv1i64.i64.nxv1i64.f64.i64(i64 1, <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_sf_vc_v_fv_u64m1(vuint64m1_t vs2, double fs1, size_t vl) {
@@ -3204,12 +3204,12 @@ vuint64m1_t test_sf_vc_v_fv_u64m1(vuint64m1_t vs2, double fs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fv_u64m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fv.nxv2i64.i32.f64.i32(i32 1, <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fv.nxv2i64.i32.nxv2i64.f64.i32(i32 1, <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fv.nxv2i64.i64.f64.i64(i64 1, <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fv.nxv2i64.i64.nxv2i64.f64.i64(i64 1, <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_sf_vc_v_fv_u64m2(vuint64m2_t vs2, double fs1, size_t vl) {
@@ -3218,12 +3218,12 @@ vuint64m2_t test_sf_vc_v_fv_u64m2(vuint64m2_t vs2, double fs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fv_u64m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fv.nxv4i64.i32.f64.i32(i32 1, <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fv.nxv4i64.i32.nxv4i64.f64.i32(i32 1, <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fv.nxv4i64.i64.f64.i64(i64 1, <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fv.nxv4i64.i64.nxv4i64.f64.i64(i64 1, <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_sf_vc_v_fv_u64m4(vuint64m4_t vs2, double fs1, size_t vl) {
@@ -3232,12 +3232,12 @@ vuint64m4_t test_sf_vc_v_fv_u64m4(vuint64m4_t vs2, double fs1, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fv_u64m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fv.nxv8i64.i32.f64.i32(i32 1, <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fv.nxv8i64.i32.nxv8i64.f64.i32(i32 1, <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fv.nxv8i64.i64.f64.i64(i64 1, <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fv.nxv8i64.i64.nxv8i64.f64.i64(i64 1, <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_sf_vc_v_fv_u64m8(vuint64m8_t vs2, double fs1, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvv-rv64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvv-rv64.c
index 8b0c73776948e60..eac2ba32d5b30b1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvv-rv64.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvv-rv64.c
@@ -8,7 +8,7 @@
 
 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv1i64.i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv1i64.nxv1i64.i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_xvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
@@ -17,7 +17,7 @@ void test_sf_vc_xvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size
 
 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv2i64.i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv2i64.nxv2i64.i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_xvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
@@ -26,7 +26,7 @@ void test_sf_vc_xvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size
 
 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv4i64.i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv4i64.nxv4i64.i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_xvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
@@ -35,7 +35,7 @@ void test_sf_vc_xvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size
 
 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv8i64.i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv8i64.nxv8i64.i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_xvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
@@ -44,7 +44,7 @@ void test_sf_vc_xvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size
 
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvv.se.nxv1i64.i64.i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvv.se.nxv1i64.i64.nxv1i64.nxv1i64.i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_sf_vc_v_xvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
@@ -53,7 +53,7 @@ vuint64m1_t test_sf_vc_v_xvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, uint64_t
 
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvv.se.nxv2i64.i64.i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvv.se.nxv2i64.i64.nxv2i64.nxv2i64.i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_sf_vc_v_xvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
@@ -62,7 +62,7 @@ vuint64m2_t test_sf_vc_v_xvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, uint64_t
 
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvv.se.nxv4i64.i64.i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvv.se.nxv4i64.i64.nxv4i64.nxv4i64.i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_sf_vc_v_xvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
@@ -71,7 +71,7 @@ vuint64m4_t test_sf_vc_v_xvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, uint64_t
 
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvv.se.nxv8i64.i64.i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvv.se.nxv8i64.i64.nxv8i64.nxv8i64.i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_sf_vc_v_xvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
@@ -80,7 +80,7 @@ vuint64m8_t test_sf_vc_v_xvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, uint64_t
 
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvv.nxv1i64.i64.i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvv.nxv1i64.i64.nxv1i64.nxv1i64.i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_sf_vc_v_xvv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) {
@@ -89,7 +89,7 @@ vuint64m1_t test_sf_vc_v_xvv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1
 
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvv.nxv2i64.i64.i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvv.nxv2i64.i64.nxv2i64.nxv2i64.i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_sf_vc_v_xvv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) {
@@ -98,7 +98,7 @@ vuint64m2_t test_sf_vc_v_xvv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1
 
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvv.nxv4i64.i64.i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvv.nxv4i64.i64.nxv4i64.nxv4i64.i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_sf_vc_v_xvv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) {
@@ -107,7 +107,7 @@ vuint64m4_t test_sf_vc_v_xvv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1
 
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvv.nxv8i64.i64.i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvv.nxv8i64.i64.nxv8i64.nxv8i64.i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_sf_vc_v_xvv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvv.c
index 4efd7da81bac450..10169692072e2d9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvv.c
@@ -11,12 +11,12 @@
 
 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u8mf8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv1i8.nxv1i8.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv1i8.nxv1i8.nxv1i8.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv1i8.nxv1i8.i64(i64 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv1i8.nxv1i8.nxv1i8.i64(i64 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_vvv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
@@ -25,12 +25,12 @@ void test_sf_vc_vvv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, s
 
 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u8mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv2i8.nxv2i8.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv2i8.nxv2i8.nxv2i8.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv2i8.nxv2i8.i64(i64 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv2i8.nxv2i8.nxv2i8.i64(i64 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_vvv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
@@ -39,12 +39,12 @@ void test_sf_vc_vvv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, s
 
 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u8mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv4i8.nxv4i8.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv4i8.nxv4i8.nxv4i8.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv4i8.nxv4i8.i64(i64 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv4i8.nxv4i8.nxv4i8.i64(i64 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_vvv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
@@ -53,12 +53,12 @@ void test_sf_vc_vvv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, s
 
 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u8m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv8i8.nxv8i8.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv8i8.nxv8i8.nxv8i8.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv8i8.nxv8i8.i64(i64 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv8i8.nxv8i8.nxv8i8.i64(i64 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_vvv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
@@ -67,12 +67,12 @@ void test_sf_vc_vvv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_
 
 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u8m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv16i8.nxv16i8.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv16i8.nxv16i8.nxv16i8.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv16i8.nxv16i8.i64(i64 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv16i8.nxv16i8.nxv16i8.i64(i64 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_vvv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
@@ -81,12 +81,12 @@ void test_sf_vc_vvv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_
 
 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u8m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv32i8.nxv32i8.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv32i8.nxv32i8.nxv32i8.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv32i8.nxv32i8.i64(i64 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv32i8.nxv32i8.nxv32i8.i64(i64 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_vvv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
@@ -95,12 +95,12 @@ void test_sf_vc_vvv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_
 
 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u8m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv64i8.nxv64i8.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv64i8.nxv64i8.nxv64i8.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv64i8.nxv64i8.i64(i64 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv64i8.nxv64i8.nxv64i8.i64(i64 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_vvv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
@@ -109,12 +109,12 @@ void test_sf_vc_vvv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_
 
 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u16mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv1i16.nxv1i16.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv1i16.nxv1i16.nxv1i16.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv1i16.nxv1i16.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv1i16.nxv1i16.nxv1i16.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_vvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
@@ -123,12 +123,12 @@ void test_sf_vc_vvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs
 
 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u16mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv2i16.nxv2i16.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv2i16.nxv2i16.nxv2i16.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv2i16.nxv2i16.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv2i16.nxv2i16.nxv2i16.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_vvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
@@ -137,12 +137,12 @@ void test_sf_vc_vvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs
 
 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u16m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv4i16.nxv4i16.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv4i16.nxv4i16.nxv4i16.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv4i16.nxv4i16.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv4i16.nxv4i16.nxv4i16.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_vvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
@@ -151,12 +151,12 @@ void test_sf_vc_vvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, s
 
 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u16m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv8i16.nxv8i16.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv8i16.nxv8i16.nxv8i16.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv8i16.nxv8i16.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv8i16.nxv8i16.nxv8i16.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_vvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
@@ -165,12 +165,12 @@ void test_sf_vc_vvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, s
 
 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u16m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv16i16.nxv16i16.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv16i16.nxv16i16.nxv16i16.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv16i16.nxv16i16.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv16i16.nxv16i16.nxv16i16.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_vvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
@@ -179,12 +179,12 @@ void test_sf_vc_vvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, s
 
 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u16m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv32i16.nxv32i16.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv32i16.nxv32i16.nxv32i16.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv32i16.nxv32i16.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv32i16.nxv32i16.nxv32i16.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_vvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
@@ -193,12 +193,12 @@ void test_sf_vc_vvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, s
 
 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u32mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv1i32.nxv1i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv1i32.nxv1i32.nxv1i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv1i32.nxv1i32.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv1i32.nxv1i32.nxv1i32.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_vvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
@@ -207,12 +207,12 @@ void test_sf_vc_vvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs
 
 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u32m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv2i32.nxv2i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv2i32.nxv2i32.nxv2i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv2i32.nxv2i32.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv2i32.nxv2i32.nxv2i32.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_vvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
@@ -221,12 +221,12 @@ void test_sf_vc_vvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, s
 
 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u32m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv4i32.nxv4i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv4i32.nxv4i32.nxv4i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv4i32.nxv4i32.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv4i32.nxv4i32.nxv4i32.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_vvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
@@ -235,12 +235,12 @@ void test_sf_vc_vvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, s
 
 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u32m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv8i32.nxv8i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv8i32.nxv8i32.nxv8i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv8i32.nxv8i32.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv8i32.nxv8i32.nxv8i32.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_vvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
@@ -249,12 +249,12 @@ void test_sf_vc_vvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, s
 
 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u32m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv16i32.nxv16i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv16i32.nxv16i32.nxv16i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv16i32.nxv16i32.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv16i32.nxv16i32.nxv16i32.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_vvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
@@ -263,12 +263,12 @@ void test_sf_vc_vvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, s
 
 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u64m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv1i64.nxv1i64.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv1i64.nxv1i64.nxv1i64.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv1i64.nxv1i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv1i64.nxv1i64.nxv1i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_vvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
@@ -277,12 +277,12 @@ void test_sf_vc_vvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, s
 
 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u64m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv2i64.nxv2i64.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv2i64.nxv2i64.nxv2i64.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv2i64.nxv2i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv2i64.nxv2i64.nxv2i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_vvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
@@ -291,12 +291,12 @@ void test_sf_vc_vvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, s
 
 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u64m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv4i64.nxv4i64.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv4i64.nxv4i64.nxv4i64.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv4i64.nxv4i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv4i64.nxv4i64.nxv4i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_vvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
@@ -305,12 +305,12 @@ void test_sf_vc_vvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, s
 
 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u64m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv8i64.nxv8i64.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i32.nxv8i64.nxv8i64.nxv8i64.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv8i64.nxv8i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.vvv.se.i64.nxv8i64.nxv8i64.nxv8i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_vvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
@@ -319,12 +319,12 @@ void test_sf_vc_vvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, s
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u8mf8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv1i8.i32.nxv1i8.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv1i8.i32.nxv1i8.nxv1i8.nxv1i8.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv1i8.i64.nxv1i8.i64(i64 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv1i8.i64.nxv1i8.nxv1i8.nxv1i8.i64(i64 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_sf_vc_v_vvv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
@@ -333,12 +333,12 @@ vuint8mf8_t test_sf_vc_v_vvv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u8mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv2i8.i32.nxv2i8.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv2i8.i32.nxv2i8.nxv2i8.nxv2i8.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv2i8.i64.nxv2i8.i64(i64 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv2i8.i64.nxv2i8.nxv2i8.nxv2i8.i64(i64 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_sf_vc_v_vvv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
@@ -347,12 +347,12 @@ vuint8mf4_t test_sf_vc_v_vvv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u8mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv4i8.i32.nxv4i8.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv4i8.i32.nxv4i8.nxv4i8.nxv4i8.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv4i8.i64.nxv4i8.i64(i64 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv4i8.i64.nxv4i8.nxv4i8.nxv4i8.i64(i64 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_sf_vc_v_vvv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
@@ -361,12 +361,12 @@ vuint8mf2_t test_sf_vc_v_vvv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u8m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv8i8.i32.nxv8i8.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv8i8.i32.nxv8i8.nxv8i8.nxv8i8.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv8i8.i64.nxv8i8.i64(i64 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv8i8.i64.nxv8i8.nxv8i8.nxv8i8.i64(i64 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_sf_vc_v_vvv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
@@ -375,12 +375,12 @@ vuint8m1_t test_sf_vc_v_vvv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u8m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv16i8.i32.nxv16i8.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv16i8.i32.nxv16i8.nxv16i8.nxv16i8.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv16i8.i64.nxv16i8.i64(i64 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv16i8.i64.nxv16i8.nxv16i8.nxv16i8.i64(i64 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_sf_vc_v_vvv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
@@ -389,12 +389,12 @@ vuint8m2_t test_sf_vc_v_vvv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u8m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv32i8.i32.nxv32i8.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv32i8.i32.nxv32i8.nxv32i8.nxv32i8.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv32i8.i64.nxv32i8.i64(i64 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv32i8.i64.nxv32i8.nxv32i8.nxv32i8.i64(i64 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_sf_vc_v_vvv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
@@ -403,12 +403,12 @@ vuint8m4_t test_sf_vc_v_vvv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u8m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.i32.nxv64i8.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.i32.nxv64i8.nxv64i8.nxv64i8.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.i64.nxv64i8.i64(i64 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.i64.nxv64i8.nxv64i8.nxv64i8.i64(i64 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_sf_vc_v_vvv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
@@ -417,12 +417,12 @@ vuint8m8_t test_sf_vc_v_vvv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u16mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv1i16.i32.nxv1i16.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv1i16.i32.nxv1i16.nxv1i16.nxv1i16.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv1i16.i64.nxv1i16.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv1i16.i64.nxv1i16.nxv1i16.nxv1i16.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_sf_vc_v_vvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
@@ -431,12 +431,12 @@ vuint16mf4_t test_sf_vc_v_vvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, vuint
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u16mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv2i16.i32.nxv2i16.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv2i16.i32.nxv2i16.nxv2i16.nxv2i16.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv2i16.i64.nxv2i16.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv2i16.i64.nxv2i16.nxv2i16.nxv2i16.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_sf_vc_v_vvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
@@ -445,12 +445,12 @@ vuint16mf2_t test_sf_vc_v_vvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, vuint
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u16m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv4i16.i32.nxv4i16.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv4i16.i32.nxv4i16.nxv4i16.nxv4i16.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv4i16.i64.nxv4i16.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv4i16.i64.nxv4i16.nxv4i16.nxv4i16.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_sf_vc_v_vvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
@@ -459,12 +459,12 @@ vuint16m1_t test_sf_vc_v_vvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u16m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv8i16.i32.nxv8i16.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv8i16.i32.nxv8i16.nxv8i16.nxv8i16.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv8i16.i64.nxv8i16.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv8i16.i64.nxv8i16.nxv8i16.nxv8i16.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_sf_vc_v_vvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
@@ -473,12 +473,12 @@ vuint16m2_t test_sf_vc_v_vvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u16m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv16i16.i32.nxv16i16.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv16i16.i32.nxv16i16.nxv16i16.nxv16i16.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv16i16.i64.nxv16i16.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv16i16.i64.nxv16i16.nxv16i16.nxv16i16.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_sf_vc_v_vvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
@@ -487,12 +487,12 @@ vuint16m4_t test_sf_vc_v_vvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u16m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.i32.nxv32i16.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.i32.nxv32i16.nxv32i16.nxv32i16.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.i64.nxv32i16.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.i64.nxv32i16.nxv32i16.nxv32i16.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_sf_vc_v_vvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
@@ -501,12 +501,12 @@ vuint16m8_t test_sf_vc_v_vvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u32mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv1i32.i32.nxv1i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv1i32.i32.nxv1i32.nxv1i32.nxv1i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv1i32.i64.nxv1i32.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv1i32.i64.nxv1i32.nxv1i32.nxv1i32.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_sf_vc_v_vvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
@@ -515,12 +515,12 @@ vuint32mf2_t test_sf_vc_v_vvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u32m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv2i32.i32.nxv2i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv2i32.i32.nxv2i32.nxv2i32.nxv2i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv2i32.i64.nxv2i32.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv2i32.i64.nxv2i32.nxv2i32.nxv2i32.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_sf_vc_v_vvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
@@ -529,12 +529,12 @@ vuint32m1_t test_sf_vc_v_vvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u32m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv4i32.i32.nxv4i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv4i32.i32.nxv4i32.nxv4i32.nxv4i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv4i32.i64.nxv4i32.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv4i32.i64.nxv4i32.nxv4i32.nxv4i32.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_sf_vc_v_vvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
@@ -543,12 +543,12 @@ vuint32m2_t test_sf_vc_v_vvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u32m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv8i32.i32.nxv8i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv8i32.i32.nxv8i32.nxv8i32.nxv8i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv8i32.i64.nxv8i32.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv8i32.i64.nxv8i32.nxv8i32.nxv8i32.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_sf_vc_v_vvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
@@ -557,12 +557,12 @@ vuint32m4_t test_sf_vc_v_vvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u32m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.i32.nxv16i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.i32.nxv16i32.nxv16i32.nxv16i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.i64.nxv16i32.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.i64.nxv16i32.nxv16i32.nxv16i32.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_sf_vc_v_vvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
@@ -571,12 +571,12 @@ vuint32m8_t test_sf_vc_v_vvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u64m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv1i64.i32.nxv1i64.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv1i64.i32.nxv1i64.nxv1i64.nxv1i64.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv1i64.i64.nxv1i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv1i64.i64.nxv1i64.nxv1i64.nxv1i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_sf_vc_v_vvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
@@ -585,12 +585,12 @@ vuint64m1_t test_sf_vc_v_vvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u64m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv2i64.i32.nxv2i64.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv2i64.i32.nxv2i64.nxv2i64.nxv2i64.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv2i64.i64.nxv2i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv2i64.i64.nxv2i64.nxv2i64.nxv2i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_sf_vc_v_vvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
@@ -599,12 +599,12 @@ vuint64m2_t test_sf_vc_v_vvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u64m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv4i64.i32.nxv4i64.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv4i64.i32.nxv4i64.nxv4i64.nxv4i64.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv4i64.i64.nxv4i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv4i64.i64.nxv4i64.nxv4i64.nxv4i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_sf_vc_v_vvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
@@ -613,12 +613,12 @@ vuint64m4_t test_sf_vc_v_vvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u64m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.i32.nxv8i64.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.i32.nxv8i64.nxv8i64.nxv8i64.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.i64.nxv8i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.i64.nxv8i64.nxv8i64.nxv8i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_sf_vc_v_vvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
@@ -627,12 +627,12 @@ vuint64m8_t test_sf_vc_v_vvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u8mf8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.nxv1i8.i32.nxv1i8.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.nxv1i8.i32.nxv1i8.nxv1i8.nxv1i8.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.nxv1i8.i64.nxv1i8.i64(i64 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.nxv1i8.i64.nxv1i8.nxv1i8.nxv1i8.i64(i64 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_sf_vc_v_vvv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
@@ -641,12 +641,12 @@ vuint8mf8_t test_sf_vc_v_vvv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u8mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.nxv2i8.i32.nxv2i8.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.nxv2i8.i32.nxv2i8.nxv2i8.nxv2i8.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.nxv2i8.i64.nxv2i8.i64(i64 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.nxv2i8.i64.nxv2i8.nxv2i8.nxv2i8.i64(i64 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_sf_vc_v_vvv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
@@ -655,12 +655,12 @@ vuint8mf4_t test_sf_vc_v_vvv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u8mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.nxv4i8.i32.nxv4i8.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.nxv4i8.i32.nxv4i8.nxv4i8.nxv4i8.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.nxv4i8.i64.nxv4i8.i64(i64 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.nxv4i8.i64.nxv4i8.nxv4i8.nxv4i8.i64(i64 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_sf_vc_v_vvv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
@@ -669,12 +669,12 @@ vuint8mf2_t test_sf_vc_v_vvv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u8m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.nxv8i8.i32.nxv8i8.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.nxv8i8.i32.nxv8i8.nxv8i8.nxv8i8.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.nxv8i8.i64.nxv8i8.i64(i64 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.nxv8i8.i64.nxv8i8.nxv8i8.nxv8i8.i64(i64 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_sf_vc_v_vvv_u8m1(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
@@ -683,12 +683,12 @@ vuint8m1_t test_sf_vc_v_vvv_u8m1(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1,
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u8m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.nxv16i8.i32.nxv16i8.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.nxv16i8.i32.nxv16i8.nxv16i8.nxv16i8.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.nxv16i8.i64.nxv16i8.i64(i64 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.nxv16i8.i64.nxv16i8.nxv16i8.nxv16i8.i64(i64 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_sf_vc_v_vvv_u8m2(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
@@ -697,12 +697,12 @@ vuint8m2_t test_sf_vc_v_vvv_u8m2(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1,
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u8m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.nxv32i8.i32.nxv32i8.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.nxv32i8.i32.nxv32i8.nxv32i8.nxv32i8.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.nxv32i8.i64.nxv32i8.i64(i64 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.nxv32i8.i64.nxv32i8.nxv32i8.nxv32i8.i64(i64 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_sf_vc_v_vvv_u8m4(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
@@ -711,12 +711,12 @@ vuint8m4_t test_sf_vc_v_vvv_u8m4(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1,
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u8m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.nxv64i8.i32.nxv64i8.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.nxv64i8.i32.nxv64i8.nxv64i8.nxv64i8.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.nxv64i8.i64.nxv64i8.i64(i64 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.nxv64i8.i64.nxv64i8.nxv64i8.nxv64i8.i64(i64 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_sf_vc_v_vvv_u8m8(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
@@ -725,12 +725,12 @@ vuint8m8_t test_sf_vc_v_vvv_u8m8(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1,
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u16mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.nxv1i16.i32.nxv1i16.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.nxv1i16.i32.nxv1i16.nxv1i16.nxv1i16.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.nxv1i16.i64.nxv1i16.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.nxv1i16.i64.nxv1i16.nxv1i16.nxv1i16.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_sf_vc_v_vvv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
@@ -739,12 +739,12 @@ vuint16mf4_t test_sf_vc_v_vvv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16m
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u16mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.nxv2i16.i32.nxv2i16.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.nxv2i16.i32.nxv2i16.nxv2i16.nxv2i16.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.nxv2i16.i64.nxv2i16.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.nxv2i16.i64.nxv2i16.nxv2i16.nxv2i16.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_sf_vc_v_vvv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
@@ -753,12 +753,12 @@ vuint16mf2_t test_sf_vc_v_vvv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16m
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u16m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.nxv4i16.i32.nxv4i16.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.nxv4i16.i32.nxv4i16.nxv4i16.nxv4i16.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.nxv4i16.i64.nxv4i16.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.nxv4i16.i64.nxv4i16.nxv4i16.nxv4i16.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_sf_vc_v_vvv_u16m1(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
@@ -767,12 +767,12 @@ vuint16m1_t test_sf_vc_v_vvv_u16m1(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u16m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.nxv8i16.i32.nxv8i16.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.nxv8i16.i32.nxv8i16.nxv8i16.nxv8i16.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.nxv8i16.i64.nxv8i16.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.nxv8i16.i64.nxv8i16.nxv8i16.nxv8i16.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_sf_vc_v_vvv_u16m2(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
@@ -781,12 +781,12 @@ vuint16m2_t test_sf_vc_v_vvv_u16m2(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u16m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.nxv16i16.i32.nxv16i16.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.nxv16i16.i32.nxv16i16.nxv16i16.nxv16i16.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.nxv16i16.i64.nxv16i16.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.nxv16i16.i64.nxv16i16.nxv16i16.nxv16i16.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_sf_vc_v_vvv_u16m4(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
@@ -795,12 +795,12 @@ vuint16m4_t test_sf_vc_v_vvv_u16m4(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u16m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.nxv32i16.i32.nxv32i16.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.nxv32i16.i32.nxv32i16.nxv32i16.nxv32i16.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.nxv32i16.i64.nxv32i16.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.nxv32i16.i64.nxv32i16.nxv32i16.nxv32i16.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_sf_vc_v_vvv_u16m8(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
@@ -809,12 +809,12 @@ vuint16m8_t test_sf_vc_v_vvv_u16m8(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u32mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.nxv1i32.i32.nxv1i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.nxv1i32.i32.nxv1i32.nxv1i32.nxv1i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.nxv1i32.i64.nxv1i32.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.nxv1i32.i64.nxv1i32.nxv1i32.nxv1i32.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_sf_vc_v_vvv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
@@ -823,12 +823,12 @@ vuint32mf2_t test_sf_vc_v_vvv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32m
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u32m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.nxv2i32.i32.nxv2i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.nxv2i32.i32.nxv2i32.nxv2i32.nxv2i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.nxv2i32.i64.nxv2i32.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.nxv2i32.i64.nxv2i32.nxv2i32.nxv2i32.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_sf_vc_v_vvv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
@@ -837,12 +837,12 @@ vuint32m1_t test_sf_vc_v_vvv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u32m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.nxv4i32.i32.nxv4i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.nxv4i32.i32.nxv4i32.nxv4i32.nxv4i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.nxv4i32.i64.nxv4i32.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.nxv4i32.i64.nxv4i32.nxv4i32.nxv4i32.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_sf_vc_v_vvv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
@@ -851,12 +851,12 @@ vuint32m2_t test_sf_vc_v_vvv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u32m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.nxv8i32.i32.nxv8i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.nxv8i32.i32.nxv8i32.nxv8i32.nxv8i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.nxv8i32.i64.nxv8i32.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.nxv8i32.i64.nxv8i32.nxv8i32.nxv8i32.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_sf_vc_v_vvv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
@@ -865,12 +865,12 @@ vuint32m4_t test_sf_vc_v_vvv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u32m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.nxv16i32.i32.nxv16i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.nxv16i32.i32.nxv16i32.nxv16i32.nxv16i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.nxv16i32.i64.nxv16i32.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.nxv16i32.i64.nxv16i32.nxv16i32.nxv16i32.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_sf_vc_v_vvv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
@@ -879,12 +879,12 @@ vuint32m8_t test_sf_vc_v_vvv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u64m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.nxv1i64.i32.nxv1i64.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.nxv1i64.i32.nxv1i64.nxv1i64.nxv1i64.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.nxv1i64.i64.nxv1i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.nxv1i64.i64.nxv1i64.nxv1i64.nxv1i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_sf_vc_v_vvv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
@@ -893,12 +893,12 @@ vuint64m1_t test_sf_vc_v_vvv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u64m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.nxv2i64.i32.nxv2i64.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.nxv2i64.i32.nxv2i64.nxv2i64.nxv2i64.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.nxv2i64.i64.nxv2i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.nxv2i64.i64.nxv2i64.nxv2i64.nxv2i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_sf_vc_v_vvv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
@@ -907,12 +907,12 @@ vuint64m2_t test_sf_vc_v_vvv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u64m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.nxv4i64.i32.nxv4i64.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.nxv4i64.i32.nxv4i64.nxv4i64.nxv4i64.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.nxv4i64.i64.nxv4i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.nxv4i64.i64.nxv4i64.nxv4i64.nxv4i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_sf_vc_v_vvv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
@@ -921,12 +921,12 @@ vuint64m4_t test_sf_vc_v_vvv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u64m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.nxv8i64.i32.nxv8i64.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.nxv8i64.i32.nxv8i64.nxv8i64.nxv8i64.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.nxv8i64.i64.nxv8i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.nxv8i64.i64.nxv8i64.nxv8i64.nxv8i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_sf_vc_v_vvv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
@@ -935,12 +935,12 @@ vuint64m8_t test_sf_vc_v_vvv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u8mf8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i8.i8.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i8.nxv1i8.i8.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv1i8.i8.i64(i64 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv1i8.nxv1i8.i8.i64(i64 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_xvv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) {
@@ -949,12 +949,12 @@ void test_sf_vc_xvv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_
 
 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u8mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i8.i8.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i8.nxv2i8.i8.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv2i8.i8.i64(i64 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv2i8.nxv2i8.i8.i64(i64 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_xvv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) {
@@ -963,12 +963,12 @@ void test_sf_vc_xvv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_
 
 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u8mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i8.i8.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i8.nxv4i8.i8.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv4i8.i8.i64(i64 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv4i8.nxv4i8.i8.i64(i64 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_xvv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) {
@@ -977,12 +977,12 @@ void test_sf_vc_xvv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_
 
 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u8m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i8.i8.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i8.nxv8i8.i8.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv8i8.i8.i64(i64 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv8i8.nxv8i8.i8.i64(i64 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_xvv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) {
@@ -991,12 +991,12 @@ void test_sf_vc_xvv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t v
 
 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u8m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i8.i8.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i8.nxv16i8.i8.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv16i8.i8.i64(i64 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv16i8.nxv16i8.i8.i64(i64 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_xvv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) {
@@ -1005,12 +1005,12 @@ void test_sf_vc_xvv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t v
 
 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u8m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv32i8.i8.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv32i8.nxv32i8.i8.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv32i8.i8.i64(i64 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv32i8.nxv32i8.i8.i64(i64 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_xvv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) {
@@ -1019,12 +1019,12 @@ void test_sf_vc_xvv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t v
 
 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u8m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv64i8.i8.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv64i8.nxv64i8.i8.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv64i8.i8.i64(i64 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv64i8.nxv64i8.i8.i64(i64 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_xvv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) {
@@ -1033,12 +1033,12 @@ void test_sf_vc_xvv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t v
 
 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u16mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i16.i16.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i16.nxv1i16.i16.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv1i16.i16.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv1i16.nxv1i16.i16.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_xvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) {
@@ -1047,12 +1047,12 @@ void test_sf_vc_xvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, s
 
 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u16mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i16.i16.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i16.nxv2i16.i16.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv2i16.i16.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv2i16.nxv2i16.i16.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_xvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) {
@@ -1061,12 +1061,12 @@ void test_sf_vc_xvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, s
 
 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u16m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i16.i16.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i16.nxv4i16.i16.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv4i16.i16.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv4i16.nxv4i16.i16.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_xvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) {
@@ -1075,12 +1075,12 @@ void test_sf_vc_xvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size
 
 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u16m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i16.i16.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i16.nxv8i16.i16.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv8i16.i16.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv8i16.nxv8i16.i16.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_xvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) {
@@ -1089,12 +1089,12 @@ void test_sf_vc_xvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size
 
 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u16m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i16.i16.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i16.nxv16i16.i16.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv16i16.i16.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv16i16.nxv16i16.i16.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_xvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) {
@@ -1103,12 +1103,12 @@ void test_sf_vc_xvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size
 
 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u16m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv32i16.i16.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv32i16.nxv32i16.i16.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv32i16.i16.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv32i16.nxv32i16.i16.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_xvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) {
@@ -1117,12 +1117,12 @@ void test_sf_vc_xvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size
 
 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u32mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i32.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i32.nxv1i32.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv1i32.i32.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv1i32.nxv1i32.i32.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_xvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) {
@@ -1131,12 +1131,12 @@ void test_sf_vc_xvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, s
 
 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u32m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i32.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i32.nxv2i32.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv2i32.i32.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv2i32.nxv2i32.i32.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_xvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) {
@@ -1145,12 +1145,12 @@ void test_sf_vc_xvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size
 
 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u32m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i32.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i32.nxv4i32.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv4i32.i32.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv4i32.nxv4i32.i32.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_xvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) {
@@ -1159,12 +1159,12 @@ void test_sf_vc_xvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size
 
 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u32m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i32.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i32.nxv8i32.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv8i32.i32.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv8i32.nxv8i32.i32.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_xvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) {
@@ -1173,12 +1173,12 @@ void test_sf_vc_xvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size
 
 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u32m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i32.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i32.nxv16i32.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv16i32.i32.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.xvv.se.i64.nxv16i32.nxv16i32.i32.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_xvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) {
@@ -1187,12 +1187,12 @@ void test_sf_vc_xvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u8mf8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv1i8.i32.i8.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv1i8.i32.nxv1i8.nxv1i8.i8.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv1i8.i64.i8.i64(i64 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv1i8.i64.nxv1i8.nxv1i8.i8.i64(i64 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_sf_vc_v_xvv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) {
@@ -1201,12 +1201,12 @@ vuint8mf8_t test_sf_vc_v_xvv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t r
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u8mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv2i8.i32.i8.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv2i8.i32.nxv2i8.nxv2i8.i8.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv2i8.i64.i8.i64(i64 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv2i8.i64.nxv2i8.nxv2i8.i8.i64(i64 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_sf_vc_v_xvv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) {
@@ -1215,12 +1215,12 @@ vuint8mf4_t test_sf_vc_v_xvv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t r
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u8mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv4i8.i32.i8.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv4i8.i32.nxv4i8.nxv4i8.i8.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv4i8.i64.i8.i64(i64 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv4i8.i64.nxv4i8.nxv4i8.i8.i64(i64 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_sf_vc_v_xvv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) {
@@ -1229,12 +1229,12 @@ vuint8mf2_t test_sf_vc_v_xvv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t r
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u8m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv8i8.i32.i8.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv8i8.i32.nxv8i8.nxv8i8.i8.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv8i8.i64.i8.i64(i64 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv8i8.i64.nxv8i8.nxv8i8.i8.i64(i64 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_sf_vc_v_xvv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) {
@@ -1243,12 +1243,12 @@ vuint8m1_t test_sf_vc_v_xvv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1,
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u8m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv16i8.i32.i8.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv16i8.i32.nxv16i8.nxv16i8.i8.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv16i8.i64.i8.i64(i64 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv16i8.i64.nxv16i8.nxv16i8.i8.i64(i64 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_sf_vc_v_xvv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) {
@@ -1257,12 +1257,12 @@ vuint8m2_t test_sf_vc_v_xvv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1,
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u8m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv32i8.i32.i8.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv32i8.i32.nxv32i8.nxv32i8.i8.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv32i8.i64.i8.i64(i64 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv32i8.i64.nxv32i8.nxv32i8.i8.i64(i64 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_sf_vc_v_xvv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) {
@@ -1271,12 +1271,12 @@ vuint8m4_t test_sf_vc_v_xvv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1,
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u8m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv64i8.i32.i8.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv64i8.i32.nxv64i8.nxv64i8.i8.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv64i8.i64.i8.i64(i64 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv64i8.i64.nxv64i8.nxv64i8.i8.i64(i64 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_sf_vc_v_xvv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) {
@@ -1285,12 +1285,12 @@ vuint8m8_t test_sf_vc_v_xvv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1,
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u16mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv1i16.i32.i16.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv1i16.i32.nxv1i16.nxv1i16.i16.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv1i16.i64.i16.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv1i16.i64.nxv1i16.nxv1i16.i16.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_sf_vc_v_xvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) {
@@ -1299,12 +1299,12 @@ vuint16mf4_t test_sf_vc_v_xvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, uint1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u16mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv2i16.i32.i16.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv2i16.i32.nxv2i16.nxv2i16.i16.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv2i16.i64.i16.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv2i16.i64.nxv2i16.nxv2i16.i16.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_sf_vc_v_xvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) {
@@ -1313,12 +1313,12 @@ vuint16mf2_t test_sf_vc_v_xvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, uint1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u16m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv4i16.i32.i16.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv4i16.i32.nxv4i16.nxv4i16.i16.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv4i16.i64.i16.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv4i16.i64.nxv4i16.nxv4i16.i16.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_sf_vc_v_xvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) {
@@ -1327,12 +1327,12 @@ vuint16m1_t test_sf_vc_v_xvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, uint16_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u16m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv8i16.i32.i16.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv8i16.i32.nxv8i16.nxv8i16.i16.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv8i16.i64.i16.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv8i16.i64.nxv8i16.nxv8i16.i16.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_sf_vc_v_xvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) {
@@ -1341,12 +1341,12 @@ vuint16m2_t test_sf_vc_v_xvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, uint16_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u16m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv16i16.i32.i16.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv16i16.i32.nxv16i16.nxv16i16.i16.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv16i16.i64.i16.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv16i16.i64.nxv16i16.nxv16i16.i16.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_sf_vc_v_xvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) {
@@ -1355,12 +1355,12 @@ vuint16m4_t test_sf_vc_v_xvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, uint16_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u16m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv32i16.i32.i16.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv32i16.i32.nxv32i16.nxv32i16.i16.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv32i16.i64.i16.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv32i16.i64.nxv32i16.nxv32i16.i16.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_sf_vc_v_xvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) {
@@ -1369,12 +1369,12 @@ vuint16m8_t test_sf_vc_v_xvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, uint16_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u32mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv1i32.i32.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv1i32.i32.nxv1i32.nxv1i32.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv1i32.i64.i32.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv1i32.i64.nxv1i32.nxv1i32.i32.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_sf_vc_v_xvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) {
@@ -1383,12 +1383,12 @@ vuint32mf2_t test_sf_vc_v_xvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, uint3
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u32m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv2i32.i32.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv2i32.i32.nxv2i32.nxv2i32.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv2i32.i64.i32.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv2i32.i64.nxv2i32.nxv2i32.i32.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_sf_vc_v_xvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) {
@@ -1397,12 +1397,12 @@ vuint32m1_t test_sf_vc_v_xvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, uint32_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u32m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv4i32.i32.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv4i32.i32.nxv4i32.nxv4i32.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv4i32.i64.i32.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv4i32.i64.nxv4i32.nxv4i32.i32.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_sf_vc_v_xvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) {
@@ -1411,12 +1411,12 @@ vuint32m2_t test_sf_vc_v_xvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, uint32_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u32m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv8i32.i32.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv8i32.i32.nxv8i32.nxv8i32.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv8i32.i64.i32.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv8i32.i64.nxv8i32.nxv8i32.i32.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_sf_vc_v_xvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) {
@@ -1425,12 +1425,12 @@ vuint32m4_t test_sf_vc_v_xvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, uint32_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u32m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv16i32.i32.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv16i32.i32.nxv16i32.nxv16i32.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv16i32.i64.i32.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv16i32.i64.nxv16i32.nxv16i32.i32.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_sf_vc_v_xvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) {
@@ -1439,12 +1439,12 @@ vuint32m8_t test_sf_vc_v_xvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, uint32_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u8mf8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.nxv1i8.i32.i8.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.nxv1i8.i32.nxv1i8.nxv1i8.i8.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.nxv1i8.i64.i8.i64(i64 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.nxv1i8.i64.nxv1i8.nxv1i8.i8.i64(i64 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_sf_vc_v_xvv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) {
@@ -1453,12 +1453,12 @@ vuint8mf8_t test_sf_vc_v_xvv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1,
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u8mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.nxv2i8.i32.i8.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.nxv2i8.i32.nxv2i8.nxv2i8.i8.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.nxv2i8.i64.i8.i64(i64 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.nxv2i8.i64.nxv2i8.nxv2i8.i8.i64(i64 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_sf_vc_v_xvv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) {
@@ -1467,12 +1467,12 @@ vuint8mf4_t test_sf_vc_v_xvv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1,
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u8mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.nxv4i8.i32.i8.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.nxv4i8.i32.nxv4i8.nxv4i8.i8.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.nxv4i8.i64.i8.i64(i64 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.nxv4i8.i64.nxv4i8.nxv4i8.i8.i64(i64 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_sf_vc_v_xvv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) {
@@ -1481,12 +1481,12 @@ vuint8mf2_t test_sf_vc_v_xvv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1,
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u8m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.nxv8i8.i32.i8.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.nxv8i8.i32.nxv8i8.nxv8i8.i8.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.nxv8i8.i64.i8.i64(i64 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.nxv8i8.i64.nxv8i8.nxv8i8.i8.i64(i64 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_sf_vc_v_xvv_u8m1(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) {
@@ -1495,12 +1495,12 @@ vuint8m1_t test_sf_vc_v_xvv_u8m1(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, siz
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u8m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.nxv16i8.i32.i8.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.nxv16i8.i32.nxv16i8.nxv16i8.i8.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.nxv16i8.i64.i8.i64(i64 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.nxv16i8.i64.nxv16i8.nxv16i8.i8.i64(i64 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_sf_vc_v_xvv_u8m2(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) {
@@ -1509,12 +1509,12 @@ vuint8m2_t test_sf_vc_v_xvv_u8m2(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, siz
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u8m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.nxv32i8.i32.i8.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.nxv32i8.i32.nxv32i8.nxv32i8.i8.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.nxv32i8.i64.i8.i64(i64 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.nxv32i8.i64.nxv32i8.nxv32i8.i8.i64(i64 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_sf_vc_v_xvv_u8m4(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) {
@@ -1523,12 +1523,12 @@ vuint8m4_t test_sf_vc_v_xvv_u8m4(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, siz
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u8m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.nxv64i8.i32.i8.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.nxv64i8.i32.nxv64i8.nxv64i8.i8.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.nxv64i8.i64.i8.i64(i64 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.nxv64i8.i64.nxv64i8.nxv64i8.i8.i64(i64 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_sf_vc_v_xvv_u8m8(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) {
@@ -1537,12 +1537,12 @@ vuint8m8_t test_sf_vc_v_xvv_u8m8(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, siz
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u16mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.nxv1i16.i32.i16.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.nxv1i16.i32.nxv1i16.nxv1i16.i16.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.nxv1i16.i64.i16.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.nxv1i16.i64.nxv1i16.nxv1i16.i16.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_sf_vc_v_xvv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) {
@@ -1551,12 +1551,12 @@ vuint16mf4_t test_sf_vc_v_xvv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u16mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.nxv2i16.i32.i16.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.nxv2i16.i32.nxv2i16.nxv2i16.i16.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.nxv2i16.i64.i16.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.nxv2i16.i64.nxv2i16.nxv2i16.i16.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_sf_vc_v_xvv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) {
@@ -1565,12 +1565,12 @@ vuint16mf2_t test_sf_vc_v_xvv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u16m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.nxv4i16.i32.i16.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.nxv4i16.i32.nxv4i16.nxv4i16.i16.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.nxv4i16.i64.i16.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.nxv4i16.i64.nxv4i16.nxv4i16.i16.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_sf_vc_v_xvv_u16m1(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) {
@@ -1579,12 +1579,12 @@ vuint16m1_t test_sf_vc_v_xvv_u16m1(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u16m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.nxv8i16.i32.i16.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.nxv8i16.i32.nxv8i16.nxv8i16.i16.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.nxv8i16.i64.i16.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.nxv8i16.i64.nxv8i16.nxv8i16.i16.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_sf_vc_v_xvv_u16m2(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) {
@@ -1593,12 +1593,12 @@ vuint16m2_t test_sf_vc_v_xvv_u16m2(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u16m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.nxv16i16.i32.i16.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.nxv16i16.i32.nxv16i16.nxv16i16.i16.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.nxv16i16.i64.i16.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.nxv16i16.i64.nxv16i16.nxv16i16.i16.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_sf_vc_v_xvv_u16m4(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) {
@@ -1607,12 +1607,12 @@ vuint16m4_t test_sf_vc_v_xvv_u16m4(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u16m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.nxv32i16.i32.i16.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.nxv32i16.i32.nxv32i16.nxv32i16.i16.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.nxv32i16.i64.i16.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.nxv32i16.i64.nxv32i16.nxv32i16.i16.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_sf_vc_v_xvv_u16m8(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) {
@@ -1621,12 +1621,12 @@ vuint16m8_t test_sf_vc_v_xvv_u16m8(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u32mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.nxv1i32.i32.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.nxv1i32.i32.nxv1i32.nxv1i32.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.nxv1i32.i64.i32.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.nxv1i32.i64.nxv1i32.nxv1i32.i32.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_sf_vc_v_xvv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) {
@@ -1635,12 +1635,12 @@ vuint32mf2_t test_sf_vc_v_xvv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u32m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.nxv2i32.i32.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.nxv2i32.i32.nxv2i32.nxv2i32.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.nxv2i32.i64.i32.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.nxv2i32.i64.nxv2i32.nxv2i32.i32.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_sf_vc_v_xvv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) {
@@ -1649,12 +1649,12 @@ vuint32m1_t test_sf_vc_v_xvv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u32m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.nxv4i32.i32.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.nxv4i32.i32.nxv4i32.nxv4i32.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.nxv4i32.i64.i32.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.nxv4i32.i64.nxv4i32.nxv4i32.i32.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_sf_vc_v_xvv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) {
@@ -1663,12 +1663,12 @@ vuint32m2_t test_sf_vc_v_xvv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u32m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.nxv8i32.i32.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.nxv8i32.i32.nxv8i32.nxv8i32.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.nxv8i32.i64.i32.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.nxv8i32.i64.nxv8i32.nxv8i32.i32.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_sf_vc_v_xvv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) {
@@ -1677,12 +1677,12 @@ vuint32m4_t test_sf_vc_v_xvv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u32m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.nxv16i32.i32.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.nxv16i32.i32.nxv16i32.nxv16i32.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.nxv16i32.i64.i32.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.nxv16i32.i64.nxv16i32.nxv16i32.i32.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_sf_vc_v_xvv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) {
@@ -1691,12 +1691,12 @@ vuint32m8_t test_sf_vc_v_xvv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1
 
 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u8mf8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv1i8.i32.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv1i8.nxv1i8.i32.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv1i8.i64.i64(i64 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv1i8.nxv1i8.i64.i64(i64 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_ivv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) {
@@ -1705,12 +1705,12 @@ void test_sf_vc_ivv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u8mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv2i8.i32.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv2i8.nxv2i8.i32.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv2i8.i64.i64(i64 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv2i8.nxv2i8.i64.i64(i64 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_ivv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) {
@@ -1719,12 +1719,12 @@ void test_sf_vc_ivv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u8mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv4i8.i32.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv4i8.nxv4i8.i32.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv4i8.i64.i64(i64 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv4i8.nxv4i8.i64.i64(i64 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_ivv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) {
@@ -1733,12 +1733,12 @@ void test_sf_vc_ivv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u8m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv8i8.i32.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv8i8.nxv8i8.i32.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv8i8.i64.i64(i64 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv8i8.nxv8i8.i64.i64(i64 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_ivv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) {
@@ -1747,12 +1747,12 @@ void test_sf_vc_ivv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u8m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv16i8.i32.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv16i8.nxv16i8.i32.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv16i8.i64.i64(i64 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv16i8.nxv16i8.i64.i64(i64 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_ivv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) {
@@ -1761,12 +1761,12 @@ void test_sf_vc_ivv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u8m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv32i8.i32.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv32i8.nxv32i8.i32.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv32i8.i64.i64(i64 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv32i8.nxv32i8.i64.i64(i64 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_ivv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) {
@@ -1775,12 +1775,12 @@ void test_sf_vc_ivv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u8m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv64i8.i32.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv64i8.nxv64i8.i32.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv64i8.i64.i64(i64 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv64i8.nxv64i8.i64.i64(i64 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_ivv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) {
@@ -1789,12 +1789,12 @@ void test_sf_vc_ivv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u16mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv1i16.i32.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv1i16.nxv1i16.i32.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv1i16.i64.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv1i16.nxv1i16.i64.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_ivv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) {
@@ -1803,12 +1803,12 @@ void test_sf_vc_ivv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u16mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv2i16.i32.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv2i16.nxv2i16.i32.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv2i16.i64.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv2i16.nxv2i16.i64.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_ivv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) {
@@ -1817,12 +1817,12 @@ void test_sf_vc_ivv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u16m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv4i16.i32.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv4i16.nxv4i16.i32.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv4i16.i64.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv4i16.nxv4i16.i64.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_ivv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) {
@@ -1831,12 +1831,12 @@ void test_sf_vc_ivv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u16m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv8i16.i32.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv8i16.nxv8i16.i32.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv8i16.i64.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv8i16.nxv8i16.i64.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_ivv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) {
@@ -1845,12 +1845,12 @@ void test_sf_vc_ivv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u16m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv16i16.i32.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv16i16.nxv16i16.i32.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv16i16.i64.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv16i16.nxv16i16.i64.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_ivv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) {
@@ -1859,12 +1859,12 @@ void test_sf_vc_ivv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u16m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv32i16.i32.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv32i16.nxv32i16.i32.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv32i16.i64.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv32i16.nxv32i16.i64.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_ivv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) {
@@ -1873,12 +1873,12 @@ void test_sf_vc_ivv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u32mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv1i32.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv1i32.nxv1i32.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv1i32.i64.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv1i32.nxv1i32.i64.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_ivv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
@@ -1887,12 +1887,12 @@ void test_sf_vc_ivv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u32m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv2i32.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv2i32.nxv2i32.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv2i32.i64.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv2i32.nxv2i32.i64.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_ivv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
@@ -1901,12 +1901,12 @@ void test_sf_vc_ivv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u32m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv4i32.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv4i32.nxv4i32.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv4i32.i64.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv4i32.nxv4i32.i64.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_ivv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
@@ -1915,12 +1915,12 @@ void test_sf_vc_ivv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u32m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv8i32.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv8i32.nxv8i32.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv8i32.i64.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv8i32.nxv8i32.i64.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_ivv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
@@ -1929,12 +1929,12 @@ void test_sf_vc_ivv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u32m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv16i32.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv16i32.nxv16i32.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv16i32.i64.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv16i32.nxv16i32.i64.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_ivv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
@@ -1943,12 +1943,12 @@ void test_sf_vc_ivv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u64m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv1i64.i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv1i64.nxv1i64.i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv1i64.i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv1i64.nxv1i64.i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_ivv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) {
@@ -1957,12 +1957,12 @@ void test_sf_vc_ivv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u64m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv2i64.i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv2i64.nxv2i64.i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv2i64.i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv2i64.nxv2i64.i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_ivv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) {
@@ -1971,12 +1971,12 @@ void test_sf_vc_ivv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u64m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv4i64.i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv4i64.nxv4i64.i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv4i64.i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv4i64.nxv4i64.i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_ivv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) {
@@ -1985,12 +1985,12 @@ void test_sf_vc_ivv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u64m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv8i64.i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i32.nxv8i64.nxv8i64.i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv8i64.i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.ivv.se.i64.nxv8i64.nxv8i64.i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_ivv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) {
@@ -1999,12 +1999,12 @@ void test_sf_vc_ivv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u8mf8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv1i8.i32.i32.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv1i8.i32.nxv1i8.nxv1i8.i32.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv1i8.i64.i64.i64(i64 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv1i8.i64.nxv1i8.nxv1i8.i64.i64(i64 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_sf_vc_v_ivv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) {
@@ -2013,12 +2013,12 @@ vuint8mf8_t test_sf_vc_v_ivv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u8mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv2i8.i32.i32.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv2i8.i32.nxv2i8.nxv2i8.i32.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv2i8.i64.i64.i64(i64 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv2i8.i64.nxv2i8.nxv2i8.i64.i64(i64 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_sf_vc_v_ivv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) {
@@ -2027,12 +2027,12 @@ vuint8mf4_t test_sf_vc_v_ivv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u8mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv4i8.i32.i32.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv4i8.i32.nxv4i8.nxv4i8.i32.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv4i8.i64.i64.i64(i64 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv4i8.i64.nxv4i8.nxv4i8.i64.i64(i64 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_sf_vc_v_ivv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) {
@@ -2041,12 +2041,12 @@ vuint8mf2_t test_sf_vc_v_ivv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u8m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv8i8.i32.i32.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv8i8.i32.nxv8i8.nxv8i8.i32.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv8i8.i64.i64.i64(i64 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv8i8.i64.nxv8i8.nxv8i8.i64.i64(i64 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_sf_vc_v_ivv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) {
@@ -2055,12 +2055,12 @@ vuint8m1_t test_sf_vc_v_ivv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u8m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv16i8.i32.i32.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv16i8.i32.nxv16i8.nxv16i8.i32.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv16i8.i64.i64.i64(i64 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv16i8.i64.nxv16i8.nxv16i8.i64.i64(i64 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_sf_vc_v_ivv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) {
@@ -2069,12 +2069,12 @@ vuint8m2_t test_sf_vc_v_ivv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u8m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv32i8.i32.i32.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv32i8.i32.nxv32i8.nxv32i8.i32.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv32i8.i64.i64.i64(i64 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv32i8.i64.nxv32i8.nxv32i8.i64.i64(i64 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_sf_vc_v_ivv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) {
@@ -2083,12 +2083,12 @@ vuint8m4_t test_sf_vc_v_ivv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u8m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv64i8.i32.i32.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv64i8.i32.nxv64i8.nxv64i8.i32.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv64i8.i64.i64.i64(i64 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv64i8.i64.nxv64i8.nxv64i8.i64.i64(i64 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_sf_vc_v_ivv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) {
@@ -2097,12 +2097,12 @@ vuint8m8_t test_sf_vc_v_ivv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u16mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv1i16.i32.i32.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv1i16.i32.nxv1i16.nxv1i16.i32.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv1i16.i64.i64.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv1i16.i64.nxv1i16.nxv1i16.i64.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_sf_vc_v_ivv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) {
@@ -2111,12 +2111,12 @@ vuint16mf4_t test_sf_vc_v_ivv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, size_
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u16mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv2i16.i32.i32.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv2i16.i32.nxv2i16.nxv2i16.i32.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv2i16.i64.i64.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv2i16.i64.nxv2i16.nxv2i16.i64.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_sf_vc_v_ivv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) {
@@ -2125,12 +2125,12 @@ vuint16mf2_t test_sf_vc_v_ivv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, size_
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u16m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv4i16.i32.i32.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv4i16.i32.nxv4i16.nxv4i16.i32.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv4i16.i64.i64.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv4i16.i64.nxv4i16.nxv4i16.i64.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_sf_vc_v_ivv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) {
@@ -2139,12 +2139,12 @@ vuint16m1_t test_sf_vc_v_ivv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u16m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv8i16.i32.i32.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv8i16.i32.nxv8i16.nxv8i16.i32.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv8i16.i64.i64.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv8i16.i64.nxv8i16.nxv8i16.i64.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_sf_vc_v_ivv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) {
@@ -2153,12 +2153,12 @@ vuint16m2_t test_sf_vc_v_ivv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u16m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv16i16.i32.i32.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv16i16.i32.nxv16i16.nxv16i16.i32.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv16i16.i64.i64.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv16i16.i64.nxv16i16.nxv16i16.i64.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_sf_vc_v_ivv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) {
@@ -2167,12 +2167,12 @@ vuint16m4_t test_sf_vc_v_ivv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u16m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv32i16.i32.i32.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv32i16.i32.nxv32i16.nxv32i16.i32.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv32i16.i64.i64.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv32i16.i64.nxv32i16.nxv32i16.i64.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_sf_vc_v_ivv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) {
@@ -2181,12 +2181,12 @@ vuint16m8_t test_sf_vc_v_ivv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u32mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv1i32.i32.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv1i32.i32.nxv1i32.nxv1i32.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv1i32.i64.i64.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv1i32.i64.nxv1i32.nxv1i32.i64.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_sf_vc_v_ivv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
@@ -2195,12 +2195,12 @@ vuint32mf2_t test_sf_vc_v_ivv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u32m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv2i32.i32.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv2i32.i32.nxv2i32.nxv2i32.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv2i32.i64.i64.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv2i32.i64.nxv2i32.nxv2i32.i64.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_sf_vc_v_ivv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
@@ -2209,12 +2209,12 @@ vuint32m1_t test_sf_vc_v_ivv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u32m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv4i32.i32.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv4i32.i32.nxv4i32.nxv4i32.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv4i32.i64.i64.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv4i32.i64.nxv4i32.nxv4i32.i64.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_sf_vc_v_ivv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
@@ -2223,12 +2223,12 @@ vuint32m2_t test_sf_vc_v_ivv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u32m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv8i32.i32.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv8i32.i32.nxv8i32.nxv8i32.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv8i32.i64.i64.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv8i32.i64.nxv8i32.nxv8i32.i64.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_sf_vc_v_ivv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
@@ -2237,12 +2237,12 @@ vuint32m4_t test_sf_vc_v_ivv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u32m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv16i32.i32.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv16i32.i32.nxv16i32.nxv16i32.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv16i32.i64.i64.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv16i32.i64.nxv16i32.nxv16i32.i64.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_sf_vc_v_ivv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
@@ -2251,12 +2251,12 @@ vuint32m8_t test_sf_vc_v_ivv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u64m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv1i64.i32.i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv1i64.i32.nxv1i64.nxv1i64.i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv1i64.i64.i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv1i64.i64.nxv1i64.nxv1i64.i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_sf_vc_v_ivv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) {
@@ -2265,12 +2265,12 @@ vuint64m1_t test_sf_vc_v_ivv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u64m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv2i64.i32.i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv2i64.i32.nxv2i64.nxv2i64.i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv2i64.i64.i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv2i64.i64.nxv2i64.nxv2i64.i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_sf_vc_v_ivv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) {
@@ -2279,12 +2279,12 @@ vuint64m2_t test_sf_vc_v_ivv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u64m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv4i64.i32.i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv4i64.i32.nxv4i64.nxv4i64.i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv4i64.i64.i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv4i64.i64.nxv4i64.nxv4i64.i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_sf_vc_v_ivv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) {
@@ -2293,12 +2293,12 @@ vuint64m4_t test_sf_vc_v_ivv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u64m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv8i64.i32.i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv8i64.i32.nxv8i64.nxv8i64.i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv8i64.i64.i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv8i64.i64.nxv8i64.nxv8i64.i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_sf_vc_v_ivv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) {
@@ -2307,12 +2307,12 @@ vuint64m8_t test_sf_vc_v_ivv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u8mf8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.nxv1i8.i32.i32.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.nxv1i8.i32.nxv1i8.nxv1i8.i32.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.nxv1i8.i64.i64.i64(i64 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.nxv1i8.i64.nxv1i8.nxv1i8.i64.i64(i64 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_sf_vc_v_ivv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) {
@@ -2321,12 +2321,12 @@ vuint8mf8_t test_sf_vc_v_ivv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u8mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.nxv2i8.i32.i32.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.nxv2i8.i32.nxv2i8.nxv2i8.i32.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.nxv2i8.i64.i64.i64(i64 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.nxv2i8.i64.nxv2i8.nxv2i8.i64.i64(i64 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_sf_vc_v_ivv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) {
@@ -2335,12 +2335,12 @@ vuint8mf4_t test_sf_vc_v_ivv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u8mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.nxv4i8.i32.i32.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.nxv4i8.i32.nxv4i8.nxv4i8.i32.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.nxv4i8.i64.i64.i64(i64 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.nxv4i8.i64.nxv4i8.nxv4i8.i64.i64(i64 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_sf_vc_v_ivv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) {
@@ -2349,12 +2349,12 @@ vuint8mf2_t test_sf_vc_v_ivv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u8m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.nxv8i8.i32.i32.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.nxv8i8.i32.nxv8i8.nxv8i8.i32.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.nxv8i8.i64.i64.i64(i64 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.nxv8i8.i64.nxv8i8.nxv8i8.i64.i64(i64 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_sf_vc_v_ivv_u8m1(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) {
@@ -2363,12 +2363,12 @@ vuint8m1_t test_sf_vc_v_ivv_u8m1(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u8m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.nxv16i8.i32.i32.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.nxv16i8.i32.nxv16i8.nxv16i8.i32.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.nxv16i8.i64.i64.i64(i64 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.nxv16i8.i64.nxv16i8.nxv16i8.i64.i64(i64 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_sf_vc_v_ivv_u8m2(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) {
@@ -2377,12 +2377,12 @@ vuint8m2_t test_sf_vc_v_ivv_u8m2(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u8m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.nxv32i8.i32.i32.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.nxv32i8.i32.nxv32i8.nxv32i8.i32.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.nxv32i8.i64.i64.i64(i64 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.nxv32i8.i64.nxv32i8.nxv32i8.i64.i64(i64 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_sf_vc_v_ivv_u8m4(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) {
@@ -2391,12 +2391,12 @@ vuint8m4_t test_sf_vc_v_ivv_u8m4(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u8m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.nxv64i8.i32.i32.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.nxv64i8.i32.nxv64i8.nxv64i8.i32.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.nxv64i8.i64.i64.i64(i64 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.nxv64i8.i64.nxv64i8.nxv64i8.i64.i64(i64 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_sf_vc_v_ivv_u8m8(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) {
@@ -2405,12 +2405,12 @@ vuint8m8_t test_sf_vc_v_ivv_u8m8(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u16mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.nxv1i16.i32.i32.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.nxv1i16.i32.nxv1i16.nxv1i16.i32.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.nxv1i16.i64.i64.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.nxv1i16.i64.nxv1i16.nxv1i16.i64.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_sf_vc_v_ivv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) {
@@ -2419,12 +2419,12 @@ vuint16mf4_t test_sf_vc_v_ivv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, size_t v
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u16mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.nxv2i16.i32.i32.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.nxv2i16.i32.nxv2i16.nxv2i16.i32.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.nxv2i16.i64.i64.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.nxv2i16.i64.nxv2i16.nxv2i16.i64.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_sf_vc_v_ivv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) {
@@ -2433,12 +2433,12 @@ vuint16mf2_t test_sf_vc_v_ivv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, size_t v
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u16m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.nxv4i16.i32.i32.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.nxv4i16.i32.nxv4i16.nxv4i16.i32.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.nxv4i16.i64.i64.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.nxv4i16.i64.nxv4i16.nxv4i16.i64.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_sf_vc_v_ivv_u16m1(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) {
@@ -2447,12 +2447,12 @@ vuint16m1_t test_sf_vc_v_ivv_u16m1(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u16m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.nxv8i16.i32.i32.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.nxv8i16.i32.nxv8i16.nxv8i16.i32.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.nxv8i16.i64.i64.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.nxv8i16.i64.nxv8i16.nxv8i16.i64.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_sf_vc_v_ivv_u16m2(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) {
@@ -2461,12 +2461,12 @@ vuint16m2_t test_sf_vc_v_ivv_u16m2(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u16m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.nxv16i16.i32.i32.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.nxv16i16.i32.nxv16i16.nxv16i16.i32.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.nxv16i16.i64.i64.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.nxv16i16.i64.nxv16i16.nxv16i16.i64.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_sf_vc_v_ivv_u16m4(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) {
@@ -2475,12 +2475,12 @@ vuint16m4_t test_sf_vc_v_ivv_u16m4(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u16m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.nxv32i16.i32.i32.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.nxv32i16.i32.nxv32i16.nxv32i16.i32.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.nxv32i16.i64.i64.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.nxv32i16.i64.nxv32i16.nxv32i16.i64.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_sf_vc_v_ivv_u16m8(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) {
@@ -2489,12 +2489,12 @@ vuint16m8_t test_sf_vc_v_ivv_u16m8(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u32mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.nxv1i32.i32.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.nxv1i32.i32.nxv1i32.nxv1i32.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.nxv1i32.i64.i64.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.nxv1i32.i64.nxv1i32.nxv1i32.i64.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_sf_vc_v_ivv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
@@ -2503,12 +2503,12 @@ vuint32mf2_t test_sf_vc_v_ivv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t v
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u32m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.nxv2i32.i32.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.nxv2i32.i32.nxv2i32.nxv2i32.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.nxv2i32.i64.i64.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.nxv2i32.i64.nxv2i32.nxv2i32.i64.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_sf_vc_v_ivv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
@@ -2517,12 +2517,12 @@ vuint32m1_t test_sf_vc_v_ivv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u32m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.nxv4i32.i32.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.nxv4i32.i32.nxv4i32.nxv4i32.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.nxv4i32.i64.i64.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.nxv4i32.i64.nxv4i32.nxv4i32.i64.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_sf_vc_v_ivv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
@@ -2531,12 +2531,12 @@ vuint32m2_t test_sf_vc_v_ivv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u32m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.nxv8i32.i32.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.nxv8i32.i32.nxv8i32.nxv8i32.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.nxv8i32.i64.i64.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.nxv8i32.i64.nxv8i32.nxv8i32.i64.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_sf_vc_v_ivv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
@@ -2545,12 +2545,12 @@ vuint32m4_t test_sf_vc_v_ivv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u32m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.nxv16i32.i32.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.nxv16i32.i32.nxv16i32.nxv16i32.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.nxv16i32.i64.i64.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.nxv16i32.i64.nxv16i32.nxv16i32.i64.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_sf_vc_v_ivv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
@@ -2559,12 +2559,12 @@ vuint32m8_t test_sf_vc_v_ivv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u64m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.nxv1i64.i32.i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.nxv1i64.i32.nxv1i64.nxv1i64.i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.nxv1i64.i64.i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.nxv1i64.i64.nxv1i64.nxv1i64.i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_sf_vc_v_ivv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) {
@@ -2573,12 +2573,12 @@ vuint64m1_t test_sf_vc_v_ivv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u64m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.nxv2i64.i32.i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.nxv2i64.i32.nxv2i64.nxv2i64.i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.nxv2i64.i64.i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.nxv2i64.i64.nxv2i64.nxv2i64.i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_sf_vc_v_ivv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) {
@@ -2587,12 +2587,12 @@ vuint64m2_t test_sf_vc_v_ivv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u64m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.nxv4i64.i32.i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.nxv4i64.i32.nxv4i64.nxv4i64.i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.nxv4i64.i64.i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.nxv4i64.i64.nxv4i64.nxv4i64.i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_sf_vc_v_ivv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) {
@@ -2601,12 +2601,12 @@ vuint64m4_t test_sf_vc_v_ivv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u64m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.nxv8i64.i32.i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.nxv8i64.i32.nxv8i64.nxv8i64.i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.nxv8i64.i64.i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.nxv8i64.i64.nxv8i64.nxv8i64.i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_sf_vc_v_ivv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) {
@@ -2615,12 +2615,12 @@ vuint64m8_t test_sf_vc_v_ivv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u16mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv1i16.f16.i32(i32 1, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv1i16.nxv1i16.f16.i32(i32 1, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv1i16.f16.i64(i64 1, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv1i16.nxv1i16.f16.i64(i64 1, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_fvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, _Float16 fs1, size_t vl) {
@@ -2629,12 +2629,12 @@ void test_sf_vc_fvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, _Float16 fs1, s
 
 // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u16mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv2i16.f16.i32(i32 1, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv2i16.nxv2i16.f16.i32(i32 1, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv2i16.f16.i64(i64 1, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv2i16.nxv2i16.f16.i64(i64 1, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_fvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, _Float16 fs1, size_t vl) {
@@ -2643,12 +2643,12 @@ void test_sf_vc_fvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, _Float16 fs1, s
 
 // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u16m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv4i16.f16.i32(i32 1, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv4i16.nxv4i16.f16.i32(i32 1, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv4i16.f16.i64(i64 1, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv4i16.nxv4i16.f16.i64(i64 1, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_fvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, _Float16 fs1, size_t vl) {
@@ -2657,12 +2657,12 @@ void test_sf_vc_fvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, _Float16 fs1, size
 
 // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u16m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv8i16.f16.i32(i32 1, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv8i16.nxv8i16.f16.i32(i32 1, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv8i16.f16.i64(i64 1, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv8i16.nxv8i16.f16.i64(i64 1, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_fvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, _Float16 fs1, size_t vl) {
@@ -2671,12 +2671,12 @@ void test_sf_vc_fvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, _Float16 fs1, size
 
 // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u16m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv16i16.f16.i32(i32 1, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv16i16.nxv16i16.f16.i32(i32 1, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv16i16.f16.i64(i64 1, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv16i16.nxv16i16.f16.i64(i64 1, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_fvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, _Float16 fs1, size_t vl) {
@@ -2685,12 +2685,12 @@ void test_sf_vc_fvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, _Float16 fs1, size
 
 // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u16m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv32i16.f16.i32(i32 1, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv32i16.nxv32i16.f16.i32(i32 1, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv32i16.f16.i64(i64 1, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv32i16.nxv32i16.f16.i64(i64 1, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_fvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, _Float16 fs1, size_t vl) {
@@ -2699,12 +2699,12 @@ void test_sf_vc_fvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, _Float16 fs1, size
 
 // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u32mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv1i32.f32.i32(i32 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv1i32.nxv1i32.f32.i32(i32 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv1i32.f32.i64(i64 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv1i32.nxv1i32.f32.i64(i64 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_fvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, float fs1, size_t vl) {
@@ -2713,12 +2713,12 @@ void test_sf_vc_fvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, float fs1, size
 
 // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u32m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv2i32.f32.i32(i32 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv2i32.nxv2i32.f32.i32(i32 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv2i32.f32.i64(i64 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv2i32.nxv2i32.f32.i64(i64 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_fvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, float fs1, size_t vl) {
@@ -2727,12 +2727,12 @@ void test_sf_vc_fvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, float fs1, size_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u32m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv4i32.f32.i32(i32 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv4i32.nxv4i32.f32.i32(i32 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv4i32.f32.i64(i64 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv4i32.nxv4i32.f32.i64(i64 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_fvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, float fs1, size_t vl) {
@@ -2741,12 +2741,12 @@ void test_sf_vc_fvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, float fs1, size_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u32m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv8i32.f32.i32(i32 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv8i32.nxv8i32.f32.i32(i32 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv8i32.f32.i64(i64 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv8i32.nxv8i32.f32.i64(i64 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_fvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, float fs1, size_t vl) {
@@ -2755,12 +2755,12 @@ void test_sf_vc_fvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, float fs1, size_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u32m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv16i32.f32.i32(i32 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv16i32.nxv16i32.f32.i32(i32 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv16i32.f32.i64(i64 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv16i32.nxv16i32.f32.i64(i64 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_fvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, float fs1, size_t vl) {
@@ -2769,12 +2769,12 @@ void test_sf_vc_fvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, float fs1, size_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u64m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv1i64.f64.i32(i32 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv1i64.nxv1i64.f64.i32(i32 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv1i64.f64.i64(i64 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv1i64.nxv1i64.f64.i64(i64 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_fvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, double fs1, size_t vl) {
@@ -2783,12 +2783,12 @@ void test_sf_vc_fvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, double fs1, size_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u64m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv2i64.f64.i32(i32 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv2i64.nxv2i64.f64.i32(i32 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv2i64.f64.i64(i64 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv2i64.nxv2i64.f64.i64(i64 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_fvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, double fs1, size_t vl) {
@@ -2797,12 +2797,12 @@ void test_sf_vc_fvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, double fs1, size_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u64m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv4i64.f64.i32(i32 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv4i64.nxv4i64.f64.i32(i32 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv4i64.f64.i64(i64 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv4i64.nxv4i64.f64.i64(i64 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_fvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, double fs1, size_t vl) {
@@ -2811,12 +2811,12 @@ void test_sf_vc_fvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, double fs1, size_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u64m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv8i64.f64.i32(i32 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i32.nxv8i64.nxv8i64.f64.i32(i32 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret void
 //
 // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv8i64.f64.i64(i64 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    call void @llvm.riscv.sf.vc.fvv.se.i64.nxv8i64.nxv8i64.f64.i64(i64 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret void
 //
 void test_sf_vc_fvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, double fs1, size_t vl) {
@@ -2825,12 +2825,12 @@ void test_sf_vc_fvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, double fs1, size_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u16mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv1i16.i32.f16.i32(i32 1, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv1i16.i32.nxv1i16.nxv1i16.f16.i32(i32 1, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv1i16.i64.f16.i64(i64 1, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv1i16.i64.nxv1i16.nxv1i16.f16.i64(i64 1, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_sf_vc_v_fvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, _Float16 fs1, size_t vl) {
@@ -2839,12 +2839,12 @@ vuint16mf4_t test_sf_vc_v_fvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, _Floa
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u16mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv2i16.i32.f16.i32(i32 1, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv2i16.i32.nxv2i16.nxv2i16.f16.i32(i32 1, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv2i16.i64.f16.i64(i64 1, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv2i16.i64.nxv2i16.nxv2i16.f16.i64(i64 1, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_sf_vc_v_fvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, _Float16 fs1, size_t vl) {
@@ -2853,12 +2853,12 @@ vuint16mf2_t test_sf_vc_v_fvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, _Floa
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u16m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv4i16.i32.f16.i32(i32 1, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv4i16.i32.nxv4i16.nxv4i16.f16.i32(i32 1, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv4i16.i64.f16.i64(i64 1, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv4i16.i64.nxv4i16.nxv4i16.f16.i64(i64 1, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_sf_vc_v_fvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, _Float16 fs1, size_t vl) {
@@ -2867,12 +2867,12 @@ vuint16m1_t test_sf_vc_v_fvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, _Float16
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u16m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv8i16.i32.f16.i32(i32 1, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv8i16.i32.nxv8i16.nxv8i16.f16.i32(i32 1, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv8i16.i64.f16.i64(i64 1, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv8i16.i64.nxv8i16.nxv8i16.f16.i64(i64 1, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_sf_vc_v_fvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, _Float16 fs1, size_t vl) {
@@ -2881,12 +2881,12 @@ vuint16m2_t test_sf_vc_v_fvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, _Float16
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u16m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv16i16.i32.f16.i32(i32 1, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv16i16.i32.nxv16i16.nxv16i16.f16.i32(i32 1, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv16i16.i64.f16.i64(i64 1, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv16i16.i64.nxv16i16.nxv16i16.f16.i64(i64 1, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_sf_vc_v_fvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, _Float16 fs1, size_t vl) {
@@ -2895,12 +2895,12 @@ vuint16m4_t test_sf_vc_v_fvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, _Float16
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u16m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv32i16.i32.f16.i32(i32 1, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv32i16.i32.nxv32i16.nxv32i16.f16.i32(i32 1, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv32i16.i64.f16.i64(i64 1, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv32i16.i64.nxv32i16.nxv32i16.f16.i64(i64 1, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_sf_vc_v_fvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, _Float16 fs1, size_t vl) {
@@ -2909,12 +2909,12 @@ vuint16m8_t test_sf_vc_v_fvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, _Float16
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u32mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv1i32.i32.f32.i32(i32 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv1i32.i32.nxv1i32.nxv1i32.f32.i32(i32 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv1i32.i64.f32.i64(i64 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv1i32.i64.nxv1i32.nxv1i32.f32.i64(i64 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_sf_vc_v_fvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, float fs1, size_t vl) {
@@ -2923,12 +2923,12 @@ vuint32mf2_t test_sf_vc_v_fvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, float
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u32m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv2i32.i32.f32.i32(i32 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv2i32.i32.nxv2i32.nxv2i32.f32.i32(i32 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv2i32.i64.f32.i64(i64 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv2i32.i64.nxv2i32.nxv2i32.f32.i64(i64 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_sf_vc_v_fvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, float fs1, size_t vl) {
@@ -2937,12 +2937,12 @@ vuint32m1_t test_sf_vc_v_fvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, float fs1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u32m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv4i32.i32.f32.i32(i32 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv4i32.i32.nxv4i32.nxv4i32.f32.i32(i32 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv4i32.i64.f32.i64(i64 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv4i32.i64.nxv4i32.nxv4i32.f32.i64(i64 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_sf_vc_v_fvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, float fs1, size_t vl) {
@@ -2951,12 +2951,12 @@ vuint32m2_t test_sf_vc_v_fvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, float fs1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u32m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv8i32.i32.f32.i32(i32 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv8i32.i32.nxv8i32.nxv8i32.f32.i32(i32 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv8i32.i64.f32.i64(i64 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv8i32.i64.nxv8i32.nxv8i32.f32.i64(i64 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_sf_vc_v_fvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, float fs1, size_t vl) {
@@ -2965,12 +2965,12 @@ vuint32m4_t test_sf_vc_v_fvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, float fs1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u32m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv16i32.i32.f32.i32(i32 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv16i32.i32.nxv16i32.nxv16i32.f32.i32(i32 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv16i32.i64.f32.i64(i64 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv16i32.i64.nxv16i32.nxv16i32.f32.i64(i64 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_sf_vc_v_fvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, float fs1, size_t vl) {
@@ -2979,12 +2979,12 @@ vuint32m8_t test_sf_vc_v_fvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, float fs1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u64m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv1i64.i32.f64.i32(i32 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv1i64.i32.nxv1i64.nxv1i64.f64.i32(i32 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv1i64.i64.f64.i64(i64 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv1i64.i64.nxv1i64.nxv1i64.f64.i64(i64 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_sf_vc_v_fvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, double fs1, size_t vl) {
@@ -2993,12 +2993,12 @@ vuint64m1_t test_sf_vc_v_fvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, double fs
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u64m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv2i64.i32.f64.i32(i32 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv2i64.i32.nxv2i64.nxv2i64.f64.i32(i32 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv2i64.i64.f64.i64(i64 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv2i64.i64.nxv2i64.nxv2i64.f64.i64(i64 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_sf_vc_v_fvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, double fs1, size_t vl) {
@@ -3007,12 +3007,12 @@ vuint64m2_t test_sf_vc_v_fvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, double fs
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u64m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv4i64.i32.f64.i32(i32 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv4i64.i32.nxv4i64.nxv4i64.f64.i32(i32 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv4i64.i64.f64.i64(i64 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv4i64.i64.nxv4i64.nxv4i64.f64.i64(i64 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_sf_vc_v_fvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, double fs1, size_t vl) {
@@ -3021,12 +3021,12 @@ vuint64m4_t test_sf_vc_v_fvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, double fs
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u64m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv8i64.i32.f64.i32(i32 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv8i64.i32.nxv8i64.nxv8i64.f64.i32(i32 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv8i64.i64.f64.i64(i64 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv8i64.i64.nxv8i64.nxv8i64.f64.i64(i64 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_sf_vc_v_fvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, double fs1, size_t vl) {
@@ -3035,12 +3035,12 @@ vuint64m8_t test_sf_vc_v_fvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, double fs
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u16mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.nxv1i16.i32.f16.i32(i32 1, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.nxv1i16.i32.nxv1i16.nxv1i16.f16.i32(i32 1, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.nxv1i16.i64.f16.i64(i64 1, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.nxv1i16.i64.nxv1i16.nxv1i16.f16.i64(i64 1, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_sf_vc_v_fvv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, _Float16 fs1, size_t vl) {
@@ -3049,12 +3049,12 @@ vuint16mf4_t test_sf_vc_v_fvv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, _Float16
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u16mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.nxv2i16.i32.f16.i32(i32 1, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.nxv2i16.i32.nxv2i16.nxv2i16.f16.i32(i32 1, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.nxv2i16.i64.f16.i64(i64 1, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.nxv2i16.i64.nxv2i16.nxv2i16.f16.i64(i64 1, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_sf_vc_v_fvv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, _Float16 fs1, size_t vl) {
@@ -3063,12 +3063,12 @@ vuint16mf2_t test_sf_vc_v_fvv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, _Float16
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u16m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.nxv4i16.i32.f16.i32(i32 1, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.nxv4i16.i32.nxv4i16.nxv4i16.f16.i32(i32 1, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.nxv4i16.i64.f16.i64(i64 1, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.nxv4i16.i64.nxv4i16.nxv4i16.f16.i64(i64 1, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_sf_vc_v_fvv_u16m1(vuint16m1_t vd, vuint16m1_t vs2, _Float16 fs1, size_t vl) {
@@ -3077,12 +3077,12 @@ vuint16m1_t test_sf_vc_v_fvv_u16m1(vuint16m1_t vd, vuint16m1_t vs2, _Float16 fs1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u16m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.nxv8i16.i32.f16.i32(i32 1, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.nxv8i16.i32.nxv8i16.nxv8i16.f16.i32(i32 1, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.nxv8i16.i64.f16.i64(i64 1, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.nxv8i16.i64.nxv8i16.nxv8i16.f16.i64(i64 1, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_sf_vc_v_fvv_u16m2(vuint16m2_t vd, vuint16m2_t vs2, _Float16 fs1, size_t vl) {
@@ -3091,12 +3091,12 @@ vuint16m2_t test_sf_vc_v_fvv_u16m2(vuint16m2_t vd, vuint16m2_t vs2, _Float16 fs1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u16m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.nxv16i16.i32.f16.i32(i32 1, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.nxv16i16.i32.nxv16i16.nxv16i16.f16.i32(i32 1, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.nxv16i16.i64.f16.i64(i64 1, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.nxv16i16.i64.nxv16i16.nxv16i16.f16.i64(i64 1, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_sf_vc_v_fvv_u16m4(vuint16m4_t vd, vuint16m4_t vs2, _Float16 fs1, size_t vl) {
@@ -3105,12 +3105,12 @@ vuint16m4_t test_sf_vc_v_fvv_u16m4(vuint16m4_t vd, vuint16m4_t vs2, _Float16 fs1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u16m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.nxv32i16.i32.f16.i32(i32 1, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.nxv32i16.i32.nxv32i16.nxv32i16.f16.i32(i32 1, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.nxv32i16.i64.f16.i64(i64 1, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.nxv32i16.i64.nxv32i16.nxv32i16.f16.i64(i64 1, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_sf_vc_v_fvv_u16m8(vuint16m8_t vd, vuint16m8_t vs2, _Float16 fs1, size_t vl) {
@@ -3119,12 +3119,12 @@ vuint16m8_t test_sf_vc_v_fvv_u16m8(vuint16m8_t vd, vuint16m8_t vs2, _Float16 fs1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u32mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvv.nxv1i32.i32.f32.i32(i32 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvv.nxv1i32.i32.nxv1i32.nxv1i32.f32.i32(i32 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvv.nxv1i32.i64.f32.i64(i64 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvv.nxv1i32.i64.nxv1i32.nxv1i32.f32.i64(i64 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_sf_vc_v_fvv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, float fs1, size_t vl) {
@@ -3133,12 +3133,12 @@ vuint32mf2_t test_sf_vc_v_fvv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, float fs
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u32m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvv.nxv2i32.i32.f32.i32(i32 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvv.nxv2i32.i32.nxv2i32.nxv2i32.f32.i32(i32 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvv.nxv2i32.i64.f32.i64(i64 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvv.nxv2i32.i64.nxv2i32.nxv2i32.f32.i64(i64 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_sf_vc_v_fvv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, float fs1, size_t vl) {
@@ -3147,12 +3147,12 @@ vuint32m1_t test_sf_vc_v_fvv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, float fs1, s
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u32m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvv.nxv4i32.i32.f32.i32(i32 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvv.nxv4i32.i32.nxv4i32.nxv4i32.f32.i32(i32 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvv.nxv4i32.i64.f32.i64(i64 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvv.nxv4i32.i64.nxv4i32.nxv4i32.f32.i64(i64 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_sf_vc_v_fvv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, float fs1, size_t vl) {
@@ -3161,12 +3161,12 @@ vuint32m2_t test_sf_vc_v_fvv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, float fs1, s
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u32m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvv.nxv8i32.i32.f32.i32(i32 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvv.nxv8i32.i32.nxv8i32.nxv8i32.f32.i32(i32 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvv.nxv8i32.i64.f32.i64(i64 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvv.nxv8i32.i64.nxv8i32.nxv8i32.f32.i64(i64 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_sf_vc_v_fvv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, float fs1, size_t vl) {
@@ -3175,12 +3175,12 @@ vuint32m4_t test_sf_vc_v_fvv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, float fs1, s
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u32m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvv.nxv16i32.i32.f32.i32(i32 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvv.nxv16i32.i32.nxv16i32.nxv16i32.f32.i32(i32 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvv.nxv16i32.i64.f32.i64(i64 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvv.nxv16i32.i64.nxv16i32.nxv16i32.f32.i64(i64 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_sf_vc_v_fvv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, float fs1, size_t vl) {
@@ -3189,12 +3189,12 @@ vuint32m8_t test_sf_vc_v_fvv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, float fs1, s
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u64m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvv.nxv1i64.i32.f64.i32(i32 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvv.nxv1i64.i32.nxv1i64.nxv1i64.f64.i32(i32 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvv.nxv1i64.i64.f64.i64(i64 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvv.nxv1i64.i64.nxv1i64.nxv1i64.f64.i64(i64 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_sf_vc_v_fvv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, double fs1, size_t vl) {
@@ -3203,12 +3203,12 @@ vuint64m1_t test_sf_vc_v_fvv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, double fs1,
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u64m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvv.nxv2i64.i32.f64.i32(i32 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvv.nxv2i64.i32.nxv2i64.nxv2i64.f64.i32(i32 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvv.nxv2i64.i64.f64.i64(i64 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvv.nxv2i64.i64.nxv2i64.nxv2i64.f64.i64(i64 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_sf_vc_v_fvv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, double fs1, size_t vl) {
@@ -3217,12 +3217,12 @@ vuint64m2_t test_sf_vc_v_fvv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, double fs1,
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u64m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvv.nxv4i64.i32.f64.i32(i32 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvv.nxv4i64.i32.nxv4i64.nxv4i64.f64.i32(i32 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvv.nxv4i64.i64.f64.i64(i64 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvv.nxv4i64.i64.nxv4i64.nxv4i64.f64.i64(i64 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_sf_vc_v_fvv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, double fs1, size_t vl) {
@@ -3231,12 +3231,12 @@ vuint64m4_t test_sf_vc_v_fvv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, double fs1,
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u64m8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvv.nxv8i64.i32.f64.i32(i32 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvv.nxv8i64.i32.nxv8i64.nxv8i64.f64.i32(i32 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvv.nxv8i64.i64.f64.i64(i64 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvv.nxv8i64.i64.nxv8i64.nxv8i64.f64.i64(i64 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_sf_vc_v_fvv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, double fs1, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvw.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvw.c
index 23ee2b7bb0f4ed6..727043077424b7d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvw.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvw.c
@@ -221,12 +221,12 @@ void test_sf_vc_vvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, s
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u8mf8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv1i16.i32.nxv1i8.nxv1i8.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv1i16.i32.nxv1i16.nxv1i8.nxv1i8.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv1i16.i64.nxv1i8.nxv1i8.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv1i16.i64.nxv1i16.nxv1i8.nxv1i8.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_sf_vc_v_vvw_se_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
@@ -235,12 +235,12 @@ vuint16mf4_t test_sf_vc_v_vvw_se_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8m
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u8mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv2i16.i32.nxv2i8.nxv2i8.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv2i16.i32.nxv2i16.nxv2i8.nxv2i8.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv2i16.i64.nxv2i8.nxv2i8.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv2i16.i64.nxv2i16.nxv2i8.nxv2i8.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_sf_vc_v_vvw_se_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
@@ -249,12 +249,12 @@ vuint16mf2_t test_sf_vc_v_vvw_se_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8m
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u8mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv4i16.i32.nxv4i8.nxv4i8.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv4i16.i32.nxv4i16.nxv4i8.nxv4i8.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv4i16.i64.nxv4i8.nxv4i8.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv4i16.i64.nxv4i16.nxv4i8.nxv4i8.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_sf_vc_v_vvw_se_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
@@ -263,12 +263,12 @@ vuint16m1_t test_sf_vc_v_vvw_se_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u8m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv8i16.i32.nxv8i8.nxv8i8.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv8i16.i32.nxv8i16.nxv8i8.nxv8i8.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv8i16.i64.nxv8i8.nxv8i8.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv8i16.i64.nxv8i16.nxv8i8.nxv8i8.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_sf_vc_v_vvw_se_u8m1(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
@@ -277,12 +277,12 @@ vuint16m2_t test_sf_vc_v_vvw_se_u8m1(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u8m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv16i16.i32.nxv16i8.nxv16i8.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv16i16.i32.nxv16i16.nxv16i8.nxv16i8.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv16i16.i64.nxv16i8.nxv16i8.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv16i16.i64.nxv16i16.nxv16i8.nxv16i8.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_sf_vc_v_vvw_se_u8m2(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
@@ -291,12 +291,12 @@ vuint16m4_t test_sf_vc_v_vvw_se_u8m2(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u8m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv32i16.i32.nxv32i8.nxv32i8.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv32i16.i32.nxv32i16.nxv32i8.nxv32i8.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv32i16.i64.nxv32i8.nxv32i8.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv32i16.i64.nxv32i16.nxv32i8.nxv32i8.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_sf_vc_v_vvw_se_u8m4(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
@@ -305,12 +305,12 @@ vuint16m8_t test_sf_vc_v_vvw_se_u8m4(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u16mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv1i32.i32.nxv1i16.nxv1i16.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv1i32.i32.nxv1i32.nxv1i16.nxv1i16.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv1i32.i64.nxv1i16.nxv1i16.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv1i32.i64.nxv1i32.nxv1i16.nxv1i16.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_sf_vc_v_vvw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
@@ -319,12 +319,12 @@ vuint32mf2_t test_sf_vc_v_vvw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, vuint
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u16mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv2i32.i32.nxv2i16.nxv2i16.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv2i32.i32.nxv2i32.nxv2i16.nxv2i16.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv2i32.i64.nxv2i16.nxv2i16.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv2i32.i64.nxv2i32.nxv2i16.nxv2i16.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_sf_vc_v_vvw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
@@ -333,12 +333,12 @@ vuint32m1_t test_sf_vc_v_vvw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, vuint16
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u16m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv4i32.i32.nxv4i16.nxv4i16.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv4i32.i32.nxv4i32.nxv4i16.nxv4i16.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv4i32.i64.nxv4i16.nxv4i16.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv4i32.i64.nxv4i32.nxv4i16.nxv4i16.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_sf_vc_v_vvw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
@@ -347,12 +347,12 @@ vuint32m2_t test_sf_vc_v_vvw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u16m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv8i32.i32.nxv8i16.nxv8i16.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv8i32.i32.nxv8i32.nxv8i16.nxv8i16.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv8i32.i64.nxv8i16.nxv8i16.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv8i32.i64.nxv8i32.nxv8i16.nxv8i16.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_sf_vc_v_vvw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
@@ -361,12 +361,12 @@ vuint32m4_t test_sf_vc_v_vvw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u16m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv16i32.i32.nxv16i16.nxv16i16.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv16i32.i32.nxv16i32.nxv16i16.nxv16i16.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv16i32.i64.nxv16i16.nxv16i16.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv16i32.i64.nxv16i32.nxv16i16.nxv16i16.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_sf_vc_v_vvw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
@@ -375,12 +375,12 @@ vuint32m8_t test_sf_vc_v_vvw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u32mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv1i64.i32.nxv1i32.nxv1i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv1i64.i32.nxv1i64.nxv1i32.nxv1i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv1i64.i64.nxv1i32.nxv1i32.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv1i64.i64.nxv1i64.nxv1i32.nxv1i32.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_sf_vc_v_vvw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
@@ -389,12 +389,12 @@ vuint64m1_t test_sf_vc_v_vvw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, vuint32
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u32m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv2i64.i32.nxv2i32.nxv2i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv2i64.i32.nxv2i64.nxv2i32.nxv2i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv2i64.i64.nxv2i32.nxv2i32.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv2i64.i64.nxv2i64.nxv2i32.nxv2i32.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_sf_vc_v_vvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
@@ -403,12 +403,12 @@ vuint64m2_t test_sf_vc_v_vvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, vuint32m1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u32m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv4i64.i32.nxv4i32.nxv4i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv4i64.i32.nxv4i64.nxv4i32.nxv4i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv4i64.i64.nxv4i32.nxv4i32.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv4i64.i64.nxv4i64.nxv4i32.nxv4i32.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_sf_vc_v_vvw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
@@ -417,12 +417,12 @@ vuint64m4_t test_sf_vc_v_vvw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, vuint32m2
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u32m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv8i64.i32.nxv8i32.nxv8i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv8i64.i32.nxv8i64.nxv8i32.nxv8i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv8i64.i64.nxv8i32.nxv8i32.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv8i64.i64.nxv8i64.nxv8i32.nxv8i32.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_sf_vc_v_vvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
@@ -431,12 +431,12 @@ vuint64m8_t test_sf_vc_v_vvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, vuint32m4
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u8mf8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvw.nxv1i16.i32.nxv1i8.nxv1i8.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvw.nxv1i16.i32.nxv1i16.nxv1i8.nxv1i8.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvw.nxv1i16.i64.nxv1i8.nxv1i8.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvw.nxv1i16.i64.nxv1i16.nxv1i8.nxv1i8.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_sf_vc_v_vvw_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
@@ -445,12 +445,12 @@ vuint16mf4_t test_sf_vc_v_vvw_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u8mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvw.nxv2i16.i32.nxv2i8.nxv2i8.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvw.nxv2i16.i32.nxv2i16.nxv2i8.nxv2i8.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvw.nxv2i16.i64.nxv2i8.nxv2i8.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvw.nxv2i16.i64.nxv2i16.nxv2i8.nxv2i8.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_sf_vc_v_vvw_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
@@ -459,12 +459,12 @@ vuint16mf2_t test_sf_vc_v_vvw_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u8mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvw.nxv4i16.i32.nxv4i8.nxv4i8.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvw.nxv4i16.i32.nxv4i16.nxv4i8.nxv4i8.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvw.nxv4i16.i64.nxv4i8.nxv4i8.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvw.nxv4i16.i64.nxv4i16.nxv4i8.nxv4i8.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_sf_vc_v_vvw_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
@@ -473,12 +473,12 @@ vuint16m1_t test_sf_vc_v_vvw_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u8m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvw.nxv8i16.i32.nxv8i8.nxv8i8.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvw.nxv8i16.i32.nxv8i16.nxv8i8.nxv8i8.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvw.nxv8i16.i64.nxv8i8.nxv8i8.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvw.nxv8i16.i64.nxv8i16.nxv8i8.nxv8i8.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_sf_vc_v_vvw_u8m1(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
@@ -487,12 +487,12 @@ vuint16m2_t test_sf_vc_v_vvw_u8m1(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u8m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvw.nxv16i16.i32.nxv16i8.nxv16i8.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvw.nxv16i16.i32.nxv16i16.nxv16i8.nxv16i8.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvw.nxv16i16.i64.nxv16i8.nxv16i8.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvw.nxv16i16.i64.nxv16i16.nxv16i8.nxv16i8.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_sf_vc_v_vvw_u8m2(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
@@ -501,12 +501,12 @@ vuint16m4_t test_sf_vc_v_vvw_u8m2(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u8m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvw.nxv32i16.i32.nxv32i8.nxv32i8.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvw.nxv32i16.i32.nxv32i16.nxv32i8.nxv32i8.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvw.nxv32i16.i64.nxv32i8.nxv32i8.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvw.nxv32i16.i64.nxv32i16.nxv32i8.nxv32i8.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_sf_vc_v_vvw_u8m4(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
@@ -515,12 +515,12 @@ vuint16m8_t test_sf_vc_v_vvw_u8m4(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u16mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvw.nxv1i32.i32.nxv1i16.nxv1i16.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvw.nxv1i32.i32.nxv1i32.nxv1i16.nxv1i16.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvw.nxv1i32.i64.nxv1i16.nxv1i16.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvw.nxv1i32.i64.nxv1i32.nxv1i16.nxv1i16.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_sf_vc_v_vvw_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
@@ -529,12 +529,12 @@ vuint32mf2_t test_sf_vc_v_vvw_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16m
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u16mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvw.nxv2i32.i32.nxv2i16.nxv2i16.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvw.nxv2i32.i32.nxv2i32.nxv2i16.nxv2i16.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvw.nxv2i32.i64.nxv2i16.nxv2i16.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvw.nxv2i32.i64.nxv2i32.nxv2i16.nxv2i16.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_sf_vc_v_vvw_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
@@ -543,12 +543,12 @@ vuint32m1_t test_sf_vc_v_vvw_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u16m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvw.nxv4i32.i32.nxv4i16.nxv4i16.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvw.nxv4i32.i32.nxv4i32.nxv4i16.nxv4i16.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvw.nxv4i32.i64.nxv4i16.nxv4i16.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvw.nxv4i32.i64.nxv4i32.nxv4i16.nxv4i16.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_sf_vc_v_vvw_u16m1(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
@@ -557,12 +557,12 @@ vuint32m2_t test_sf_vc_v_vvw_u16m1(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u16m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvw.nxv8i32.i32.nxv8i16.nxv8i16.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvw.nxv8i32.i32.nxv8i32.nxv8i16.nxv8i16.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvw.nxv8i32.i64.nxv8i16.nxv8i16.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvw.nxv8i32.i64.nxv8i32.nxv8i16.nxv8i16.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_sf_vc_v_vvw_u16m2(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
@@ -571,12 +571,12 @@ vuint32m4_t test_sf_vc_v_vvw_u16m2(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u16m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvw.nxv16i32.i32.nxv16i16.nxv16i16.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvw.nxv16i32.i32.nxv16i32.nxv16i16.nxv16i16.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvw.nxv16i32.i64.nxv16i16.nxv16i16.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvw.nxv16i32.i64.nxv16i32.nxv16i16.nxv16i16.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_sf_vc_v_vvw_u16m4(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
@@ -585,12 +585,12 @@ vuint32m8_t test_sf_vc_v_vvw_u16m4(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u32mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvw.nxv1i64.i32.nxv1i32.nxv1i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvw.nxv1i64.i32.nxv1i64.nxv1i32.nxv1i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvw.nxv1i64.i64.nxv1i32.nxv1i32.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvw.nxv1i64.i64.nxv1i64.nxv1i32.nxv1i32.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_sf_vc_v_vvw_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
@@ -599,12 +599,12 @@ vuint64m1_t test_sf_vc_v_vvw_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u32m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvw.nxv2i64.i32.nxv2i32.nxv2i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvw.nxv2i64.i32.nxv2i64.nxv2i32.nxv2i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvw.nxv2i64.i64.nxv2i32.nxv2i32.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvw.nxv2i64.i64.nxv2i64.nxv2i32.nxv2i32.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_sf_vc_v_vvw_u32m1(vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
@@ -613,12 +613,12 @@ vuint64m2_t test_sf_vc_v_vvw_u32m1(vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u32m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvw.nxv4i64.i32.nxv4i32.nxv4i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvw.nxv4i64.i32.nxv4i64.nxv4i32.nxv4i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvw.nxv4i64.i64.nxv4i32.nxv4i32.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvw.nxv4i64.i64.nxv4i64.nxv4i32.nxv4i32.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_sf_vc_v_vvw_u32m2(vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
@@ -627,12 +627,12 @@ vuint64m4_t test_sf_vc_v_vvw_u32m2(vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u32m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvw.nxv8i64.i32.nxv8i32.nxv8i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvw.nxv8i64.i32.nxv8i64.nxv8i32.nxv8i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvw.nxv8i64.i64.nxv8i32.nxv8i32.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvw.nxv8i64.i64.nxv8i64.nxv8i32.nxv8i32.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_sf_vc_v_vvw_u32m4(vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
@@ -851,12 +851,12 @@ void test_sf_vc_xvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u8mf8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv1i16.i32.nxv1i8.i8.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv1i16.i32.nxv1i16.nxv1i8.i8.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv1i16.i64.nxv1i8.i8.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv1i16.i64.nxv1i16.nxv1i8.i8.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_sf_vc_v_xvw_se_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) {
@@ -865,12 +865,12 @@ vuint16mf4_t test_sf_vc_v_xvw_se_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u8mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv2i16.i32.nxv2i8.i8.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv2i16.i32.nxv2i16.nxv2i8.i8.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv2i16.i64.nxv2i8.i8.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv2i16.i64.nxv2i16.nxv2i8.i8.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_sf_vc_v_xvw_se_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) {
@@ -879,12 +879,12 @@ vuint16mf2_t test_sf_vc_v_xvw_se_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u8mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv4i16.i32.nxv4i8.i8.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv4i16.i32.nxv4i16.nxv4i8.i8.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv4i16.i64.nxv4i8.i8.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv4i16.i64.nxv4i16.nxv4i8.i8.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_sf_vc_v_xvw_se_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) {
@@ -893,12 +893,12 @@ vuint16m1_t test_sf_vc_v_xvw_se_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, uint8_t r
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u8m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv8i16.i32.nxv8i8.i8.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv8i16.i32.nxv8i16.nxv8i8.i8.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv8i16.i64.nxv8i8.i8.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv8i16.i64.nxv8i16.nxv8i8.i8.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_sf_vc_v_xvw_se_u8m1(vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) {
@@ -907,12 +907,12 @@ vuint16m2_t test_sf_vc_v_xvw_se_u8m1(vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u8m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv16i16.i32.nxv16i8.i8.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv16i16.i32.nxv16i16.nxv16i8.i8.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv16i16.i64.nxv16i8.i8.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv16i16.i64.nxv16i16.nxv16i8.i8.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_sf_vc_v_xvw_se_u8m2(vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) {
@@ -921,12 +921,12 @@ vuint16m4_t test_sf_vc_v_xvw_se_u8m2(vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u8m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv32i16.i32.nxv32i8.i8.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv32i16.i32.nxv32i16.nxv32i8.i8.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv32i16.i64.nxv32i8.i8.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv32i16.i64.nxv32i16.nxv32i8.i8.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_sf_vc_v_xvw_se_u8m4(vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) {
@@ -935,12 +935,12 @@ vuint16m8_t test_sf_vc_v_xvw_se_u8m4(vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u16mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv1i32.i32.nxv1i16.i16.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv1i32.i32.nxv1i32.nxv1i16.i16.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv1i32.i64.nxv1i16.i16.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv1i32.i64.nxv1i32.nxv1i16.i16.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_sf_vc_v_xvw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) {
@@ -949,12 +949,12 @@ vuint32mf2_t test_sf_vc_v_xvw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, uint1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u16mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv2i32.i32.nxv2i16.i16.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv2i32.i32.nxv2i32.nxv2i16.i16.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv2i32.i64.nxv2i16.i16.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv2i32.i64.nxv2i32.nxv2i16.i16.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_sf_vc_v_xvw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) {
@@ -963,12 +963,12 @@ vuint32m1_t test_sf_vc_v_xvw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, uint16_
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u16m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv4i32.i32.nxv4i16.i16.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv4i32.i32.nxv4i32.nxv4i16.i16.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv4i32.i64.nxv4i16.i16.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv4i32.i64.nxv4i32.nxv4i16.i16.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_sf_vc_v_xvw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) {
@@ -977,12 +977,12 @@ vuint32m2_t test_sf_vc_v_xvw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, uint16_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u16m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv8i32.i32.nxv8i16.i16.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv8i32.i32.nxv8i32.nxv8i16.i16.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv8i32.i64.nxv8i16.i16.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv8i32.i64.nxv8i32.nxv8i16.i16.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_sf_vc_v_xvw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) {
@@ -991,12 +991,12 @@ vuint32m4_t test_sf_vc_v_xvw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, uint16_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u16m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv16i32.i32.nxv16i16.i16.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv16i32.i32.nxv16i32.nxv16i16.i16.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv16i32.i64.nxv16i16.i16.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv16i32.i64.nxv16i32.nxv16i16.i16.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_sf_vc_v_xvw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) {
@@ -1005,12 +1005,12 @@ vuint32m8_t test_sf_vc_v_xvw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, uint16_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u32mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv1i64.i32.nxv1i32.i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv1i64.i32.nxv1i64.nxv1i32.i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv1i64.i64.nxv1i32.i32.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv1i64.i64.nxv1i64.nxv1i32.i32.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_sf_vc_v_xvw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) {
@@ -1019,12 +1019,12 @@ vuint64m1_t test_sf_vc_v_xvw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, uint32_
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u32m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv2i64.i32.nxv2i32.i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv2i64.i32.nxv2i64.nxv2i32.i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv2i64.i64.nxv2i32.i32.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv2i64.i64.nxv2i64.nxv2i32.i32.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_sf_vc_v_xvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) {
@@ -1033,12 +1033,12 @@ vuint64m2_t test_sf_vc_v_xvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, uint32_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u32m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv4i64.i32.nxv4i32.i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv4i64.i32.nxv4i64.nxv4i32.i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv4i64.i64.nxv4i32.i32.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv4i64.i64.nxv4i64.nxv4i32.i32.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_sf_vc_v_xvw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) {
@@ -1047,12 +1047,12 @@ vuint64m4_t test_sf_vc_v_xvw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, uint32_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u32m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv8i64.i32.nxv8i32.i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv8i64.i32.nxv8i64.nxv8i32.i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv8i64.i64.nxv8i32.i32.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv8i64.i64.nxv8i64.nxv8i32.i32.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_sf_vc_v_xvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) {
@@ -1061,12 +1061,12 @@ vuint64m8_t test_sf_vc_v_xvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, uint32_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u8mf8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvw.nxv1i16.i32.nxv1i8.i8.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvw.nxv1i16.i32.nxv1i16.nxv1i8.i8.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvw.nxv1i16.i64.nxv1i8.i8.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvw.nxv1i16.i64.nxv1i16.nxv1i8.i8.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_sf_vc_v_xvw_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) {
@@ -1075,12 +1075,12 @@ vuint16mf4_t test_sf_vc_v_xvw_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u8mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvw.nxv2i16.i32.nxv2i8.i8.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvw.nxv2i16.i32.nxv2i16.nxv2i8.i8.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvw.nxv2i16.i64.nxv2i8.i8.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvw.nxv2i16.i64.nxv2i16.nxv2i8.i8.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_sf_vc_v_xvw_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) {
@@ -1089,12 +1089,12 @@ vuint16mf2_t test_sf_vc_v_xvw_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u8mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvw.nxv4i16.i32.nxv4i8.i8.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvw.nxv4i16.i32.nxv4i16.nxv4i8.i8.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvw.nxv4i16.i64.nxv4i8.i8.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvw.nxv4i16.i64.nxv4i16.nxv4i8.i8.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_sf_vc_v_xvw_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) {
@@ -1103,12 +1103,12 @@ vuint16m1_t test_sf_vc_v_xvw_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1,
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u8m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvw.nxv8i16.i32.nxv8i8.i8.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvw.nxv8i16.i32.nxv8i16.nxv8i8.i8.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvw.nxv8i16.i64.nxv8i8.i8.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvw.nxv8i16.i64.nxv8i16.nxv8i8.i8.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_sf_vc_v_xvw_u8m1(vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) {
@@ -1117,12 +1117,12 @@ vuint16m2_t test_sf_vc_v_xvw_u8m1(vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, s
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u8m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvw.nxv16i16.i32.nxv16i8.i8.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvw.nxv16i16.i32.nxv16i16.nxv16i8.i8.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvw.nxv16i16.i64.nxv16i8.i8.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvw.nxv16i16.i64.nxv16i16.nxv16i8.i8.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_sf_vc_v_xvw_u8m2(vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) {
@@ -1131,12 +1131,12 @@ vuint16m4_t test_sf_vc_v_xvw_u8m2(vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, s
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u8m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvw.nxv32i16.i32.nxv32i8.i8.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvw.nxv32i16.i32.nxv32i16.nxv32i8.i8.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvw.nxv32i16.i64.nxv32i8.i8.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvw.nxv32i16.i64.nxv32i16.nxv32i8.i8.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_sf_vc_v_xvw_u8m4(vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) {
@@ -1145,12 +1145,12 @@ vuint16m8_t test_sf_vc_v_xvw_u8m4(vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, s
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u16mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvw.nxv1i32.i32.nxv1i16.i16.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvw.nxv1i32.i32.nxv1i32.nxv1i16.i16.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvw.nxv1i32.i64.nxv1i16.i16.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvw.nxv1i32.i64.nxv1i32.nxv1i16.i16.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_sf_vc_v_xvw_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) {
@@ -1159,12 +1159,12 @@ vuint32mf2_t test_sf_vc_v_xvw_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u16mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvw.nxv2i32.i32.nxv2i16.i16.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvw.nxv2i32.i32.nxv2i32.nxv2i16.i16.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvw.nxv2i32.i64.nxv2i16.i16.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvw.nxv2i32.i64.nxv2i32.nxv2i16.i16.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_sf_vc_v_xvw_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) {
@@ -1173,12 +1173,12 @@ vuint32m1_t test_sf_vc_v_xvw_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, uint16_t r
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u16m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvw.nxv4i32.i32.nxv4i16.i16.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvw.nxv4i32.i32.nxv4i32.nxv4i16.i16.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvw.nxv4i32.i64.nxv4i16.i16.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvw.nxv4i32.i64.nxv4i32.nxv4i16.i16.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_sf_vc_v_xvw_u16m1(vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) {
@@ -1187,12 +1187,12 @@ vuint32m2_t test_sf_vc_v_xvw_u16m1(vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u16m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvw.nxv8i32.i32.nxv8i16.i16.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvw.nxv8i32.i32.nxv8i32.nxv8i16.i16.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvw.nxv8i32.i64.nxv8i16.i16.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvw.nxv8i32.i64.nxv8i32.nxv8i16.i16.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_sf_vc_v_xvw_u16m2(vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) {
@@ -1201,12 +1201,12 @@ vuint32m4_t test_sf_vc_v_xvw_u16m2(vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u16m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvw.nxv16i32.i32.nxv16i16.i16.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvw.nxv16i32.i32.nxv16i32.nxv16i16.i16.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvw.nxv16i32.i64.nxv16i16.i16.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvw.nxv16i32.i64.nxv16i32.nxv16i16.i16.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_sf_vc_v_xvw_u16m4(vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) {
@@ -1215,12 +1215,12 @@ vuint32m8_t test_sf_vc_v_xvw_u16m4(vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u32mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvw.nxv1i64.i32.nxv1i32.i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvw.nxv1i64.i32.nxv1i64.nxv1i32.i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvw.nxv1i64.i64.nxv1i32.i32.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvw.nxv1i64.i64.nxv1i64.nxv1i32.i32.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_sf_vc_v_xvw_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) {
@@ -1229,12 +1229,12 @@ vuint64m1_t test_sf_vc_v_xvw_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, uint32_t r
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u32m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvw.nxv2i64.i32.nxv2i32.i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvw.nxv2i64.i32.nxv2i64.nxv2i32.i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvw.nxv2i64.i64.nxv2i32.i32.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvw.nxv2i64.i64.nxv2i64.nxv2i32.i32.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_sf_vc_v_xvw_u32m1(vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) {
@@ -1243,12 +1243,12 @@ vuint64m2_t test_sf_vc_v_xvw_u32m1(vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u32m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvw.nxv4i64.i32.nxv4i32.i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvw.nxv4i64.i32.nxv4i64.nxv4i32.i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvw.nxv4i64.i64.nxv4i32.i32.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvw.nxv4i64.i64.nxv4i64.nxv4i32.i32.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_sf_vc_v_xvw_u32m2(vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) {
@@ -1257,12 +1257,12 @@ vuint64m4_t test_sf_vc_v_xvw_u32m2(vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u32m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvw.nxv8i64.i32.nxv8i32.i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvw.nxv8i64.i32.nxv8i64.nxv8i32.i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvw.nxv8i64.i64.nxv8i32.i32.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvw.nxv8i64.i64.nxv8i64.nxv8i32.i32.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_sf_vc_v_xvw_u32m4(vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) {
@@ -1481,12 +1481,12 @@ void test_sf_vc_ivw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u8mf8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv1i16.i32.nxv1i8.i32.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv1i16.i32.nxv1i16.nxv1i8.i32.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv1i16.i64.nxv1i8.i64.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv1i16.i64.nxv1i16.nxv1i8.i64.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_sf_vc_v_ivw_se_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) {
@@ -1495,12 +1495,12 @@ vuint16mf4_t test_sf_vc_v_ivw_se_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, size_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u8mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv2i16.i32.nxv2i8.i32.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv2i16.i32.nxv2i16.nxv2i8.i32.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv2i16.i64.nxv2i8.i64.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv2i16.i64.nxv2i16.nxv2i8.i64.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_sf_vc_v_ivw_se_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) {
@@ -1509,12 +1509,12 @@ vuint16mf2_t test_sf_vc_v_ivw_se_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, size_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u8mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv4i16.i32.nxv4i8.i32.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv4i16.i32.nxv4i16.nxv4i8.i32.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv4i16.i64.nxv4i8.i64.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv4i16.i64.nxv4i16.nxv4i8.i64.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_sf_vc_v_ivw_se_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) {
@@ -1523,12 +1523,12 @@ vuint16m1_t test_sf_vc_v_ivw_se_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u8m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv8i16.i32.nxv8i8.i32.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv8i16.i32.nxv8i16.nxv8i8.i32.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv8i16.i64.nxv8i8.i64.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv8i16.i64.nxv8i16.nxv8i8.i64.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_sf_vc_v_ivw_se_u8m1(vuint16m2_t vd, vuint8m1_t vs2, size_t vl) {
@@ -1537,12 +1537,12 @@ vuint16m2_t test_sf_vc_v_ivw_se_u8m1(vuint16m2_t vd, vuint8m1_t vs2, size_t vl)
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u8m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv16i16.i32.nxv16i8.i32.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv16i16.i32.nxv16i16.nxv16i8.i32.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv16i16.i64.nxv16i8.i64.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv16i16.i64.nxv16i16.nxv16i8.i64.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_sf_vc_v_ivw_se_u8m2(vuint16m4_t vd, vuint8m2_t vs2, size_t vl) {
@@ -1551,12 +1551,12 @@ vuint16m4_t test_sf_vc_v_ivw_se_u8m2(vuint16m4_t vd, vuint8m2_t vs2, size_t vl)
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u8m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv32i16.i32.nxv32i8.i32.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv32i16.i32.nxv32i16.nxv32i8.i32.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv32i16.i64.nxv32i8.i64.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv32i16.i64.nxv32i16.nxv32i8.i64.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_sf_vc_v_ivw_se_u8m4(vuint16m8_t vd, vuint8m4_t vs2, size_t vl) {
@@ -1565,12 +1565,12 @@ vuint16m8_t test_sf_vc_v_ivw_se_u8m4(vuint16m8_t vd, vuint8m4_t vs2, size_t vl)
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u16mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv1i32.i32.nxv1i16.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv1i32.i32.nxv1i32.nxv1i16.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv1i32.i64.nxv1i16.i64.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv1i32.i64.nxv1i32.nxv1i16.i64.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_sf_vc_v_ivw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) {
@@ -1579,12 +1579,12 @@ vuint32mf2_t test_sf_vc_v_ivw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, size_
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u16mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv2i32.i32.nxv2i16.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv2i32.i32.nxv2i32.nxv2i16.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv2i32.i64.nxv2i16.i64.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv2i32.i64.nxv2i32.nxv2i16.i64.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_sf_vc_v_ivw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) {
@@ -1593,12 +1593,12 @@ vuint32m1_t test_sf_vc_v_ivw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, size_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u16m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv4i32.i32.nxv4i16.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv4i32.i32.nxv4i32.nxv4i16.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv4i32.i64.nxv4i16.i64.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv4i32.i64.nxv4i32.nxv4i16.i64.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_sf_vc_v_ivw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, size_t vl) {
@@ -1607,12 +1607,12 @@ vuint32m2_t test_sf_vc_v_ivw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u16m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv8i32.i32.nxv8i16.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv8i32.i32.nxv8i32.nxv8i16.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv8i32.i64.nxv8i16.i64.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv8i32.i64.nxv8i32.nxv8i16.i64.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_sf_vc_v_ivw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, size_t vl) {
@@ -1621,12 +1621,12 @@ vuint32m4_t test_sf_vc_v_ivw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u16m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv16i32.i32.nxv16i16.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv16i32.i32.nxv16i32.nxv16i16.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv16i32.i64.nxv16i16.i64.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv16i32.i64.nxv16i32.nxv16i16.i64.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_sf_vc_v_ivw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, size_t vl) {
@@ -1635,12 +1635,12 @@ vuint32m8_t test_sf_vc_v_ivw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u32mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv1i64.i32.nxv1i32.i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv1i64.i32.nxv1i64.nxv1i32.i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv1i64.i64.nxv1i32.i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv1i64.i64.nxv1i64.nxv1i32.i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_sf_vc_v_ivw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) {
@@ -1649,12 +1649,12 @@ vuint64m1_t test_sf_vc_v_ivw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, size_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u32m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv2i64.i32.nxv2i32.i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv2i64.i32.nxv2i64.nxv2i32.i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv2i64.i64.nxv2i32.i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv2i64.i64.nxv2i64.nxv2i32.i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_sf_vc_v_ivw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, size_t vl) {
@@ -1663,12 +1663,12 @@ vuint64m2_t test_sf_vc_v_ivw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u32m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv4i64.i32.nxv4i32.i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv4i64.i32.nxv4i64.nxv4i32.i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv4i64.i64.nxv4i32.i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv4i64.i64.nxv4i64.nxv4i32.i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_sf_vc_v_ivw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, size_t vl) {
@@ -1677,12 +1677,12 @@ vuint64m4_t test_sf_vc_v_ivw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u32m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv8i64.i32.nxv8i32.i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv8i64.i32.nxv8i64.nxv8i32.i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv8i64.i64.nxv8i32.i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv8i64.i64.nxv8i64.nxv8i32.i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_sf_vc_v_ivw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, size_t vl) {
@@ -1691,12 +1691,12 @@ vuint64m8_t test_sf_vc_v_ivw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, size_t vl
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u8mf8(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivw.nxv1i16.i32.nxv1i8.i32.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivw.nxv1i16.i32.nxv1i16.nxv1i8.i32.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivw.nxv1i16.i64.nxv1i8.i64.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivw.nxv1i16.i64.nxv1i16.nxv1i8.i64.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_sf_vc_v_ivw_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) {
@@ -1705,12 +1705,12 @@ vuint16mf4_t test_sf_vc_v_ivw_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl)
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u8mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivw.nxv2i16.i32.nxv2i8.i32.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivw.nxv2i16.i32.nxv2i16.nxv2i8.i32.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivw.nxv2i16.i64.nxv2i8.i64.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivw.nxv2i16.i64.nxv2i16.nxv2i8.i64.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_sf_vc_v_ivw_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) {
@@ -1719,12 +1719,12 @@ vuint16mf2_t test_sf_vc_v_ivw_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl)
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u8mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivw.nxv4i16.i32.nxv4i8.i32.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivw.nxv4i16.i32.nxv4i16.nxv4i8.i32.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivw.nxv4i16.i64.nxv4i8.i64.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivw.nxv4i16.i64.nxv4i16.nxv4i8.i64.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_sf_vc_v_ivw_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) {
@@ -1733,12 +1733,12 @@ vuint16m1_t test_sf_vc_v_ivw_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u8m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivw.nxv8i16.i32.nxv8i8.i32.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivw.nxv8i16.i32.nxv8i16.nxv8i8.i32.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivw.nxv8i16.i64.nxv8i8.i64.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivw.nxv8i16.i64.nxv8i16.nxv8i8.i64.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_sf_vc_v_ivw_u8m1(vuint16m2_t vd, vuint8m1_t vs2, size_t vl) {
@@ -1747,12 +1747,12 @@ vuint16m2_t test_sf_vc_v_ivw_u8m1(vuint16m2_t vd, vuint8m1_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u8m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivw.nxv16i16.i32.nxv16i8.i32.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivw.nxv16i16.i32.nxv16i16.nxv16i8.i32.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivw.nxv16i16.i64.nxv16i8.i64.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivw.nxv16i16.i64.nxv16i16.nxv16i8.i64.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_sf_vc_v_ivw_u8m2(vuint16m4_t vd, vuint8m2_t vs2, size_t vl) {
@@ -1761,12 +1761,12 @@ vuint16m4_t test_sf_vc_v_ivw_u8m2(vuint16m4_t vd, vuint8m2_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u8m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivw.nxv32i16.i32.nxv32i8.i32.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivw.nxv32i16.i32.nxv32i16.nxv32i8.i32.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivw.nxv32i16.i64.nxv32i8.i64.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivw.nxv32i16.i64.nxv32i16.nxv32i8.i64.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_sf_vc_v_ivw_u8m4(vuint16m8_t vd, vuint8m4_t vs2, size_t vl) {
@@ -1775,12 +1775,12 @@ vuint16m8_t test_sf_vc_v_ivw_u8m4(vuint16m8_t vd, vuint8m4_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u16mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivw.nxv1i32.i32.nxv1i16.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivw.nxv1i32.i32.nxv1i32.nxv1i16.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivw.nxv1i32.i64.nxv1i16.i64.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivw.nxv1i32.i64.nxv1i32.nxv1i16.i64.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_sf_vc_v_ivw_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) {
@@ -1789,12 +1789,12 @@ vuint32mf2_t test_sf_vc_v_ivw_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, size_t v
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u16mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivw.nxv2i32.i32.nxv2i16.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivw.nxv2i32.i32.nxv2i32.nxv2i16.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivw.nxv2i32.i64.nxv2i16.i64.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivw.nxv2i32.i64.nxv2i32.nxv2i16.i64.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_sf_vc_v_ivw_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) {
@@ -1803,12 +1803,12 @@ vuint32m1_t test_sf_vc_v_ivw_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, size_t vl)
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u16m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivw.nxv4i32.i32.nxv4i16.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivw.nxv4i32.i32.nxv4i32.nxv4i16.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivw.nxv4i32.i64.nxv4i16.i64.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivw.nxv4i32.i64.nxv4i32.nxv4i16.i64.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_sf_vc_v_ivw_u16m1(vuint32m2_t vd, vuint16m1_t vs2, size_t vl) {
@@ -1817,12 +1817,12 @@ vuint32m2_t test_sf_vc_v_ivw_u16m1(vuint32m2_t vd, vuint16m1_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u16m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivw.nxv8i32.i32.nxv8i16.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivw.nxv8i32.i32.nxv8i32.nxv8i16.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivw.nxv8i32.i64.nxv8i16.i64.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivw.nxv8i32.i64.nxv8i32.nxv8i16.i64.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_sf_vc_v_ivw_u16m2(vuint32m4_t vd, vuint16m2_t vs2, size_t vl) {
@@ -1831,12 +1831,12 @@ vuint32m4_t test_sf_vc_v_ivw_u16m2(vuint32m4_t vd, vuint16m2_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u16m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivw.nxv16i32.i32.nxv16i16.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivw.nxv16i32.i32.nxv16i32.nxv16i16.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivw.nxv16i32.i64.nxv16i16.i64.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivw.nxv16i32.i64.nxv16i32.nxv16i16.i64.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_sf_vc_v_ivw_u16m4(vuint32m8_t vd, vuint16m4_t vs2, size_t vl) {
@@ -1845,12 +1845,12 @@ vuint32m8_t test_sf_vc_v_ivw_u16m4(vuint32m8_t vd, vuint16m4_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u32mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivw.nxv1i64.i32.nxv1i32.i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivw.nxv1i64.i32.nxv1i64.nxv1i32.i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivw.nxv1i64.i64.nxv1i32.i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivw.nxv1i64.i64.nxv1i64.nxv1i32.i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_sf_vc_v_ivw_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) {
@@ -1859,12 +1859,12 @@ vuint64m1_t test_sf_vc_v_ivw_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, size_t vl)
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u32m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivw.nxv2i64.i32.nxv2i32.i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivw.nxv2i64.i32.nxv2i64.nxv2i32.i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivw.nxv2i64.i64.nxv2i32.i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivw.nxv2i64.i64.nxv2i64.nxv2i32.i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_sf_vc_v_ivw_u32m1(vuint64m2_t vd, vuint32m1_t vs2, size_t vl) {
@@ -1873,12 +1873,12 @@ vuint64m2_t test_sf_vc_v_ivw_u32m1(vuint64m2_t vd, vuint32m1_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u32m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivw.nxv4i64.i32.nxv4i32.i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivw.nxv4i64.i32.nxv4i64.nxv4i32.i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivw.nxv4i64.i64.nxv4i32.i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivw.nxv4i64.i64.nxv4i64.nxv4i32.i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_sf_vc_v_ivw_u32m2(vuint64m4_t vd, vuint32m2_t vs2, size_t vl) {
@@ -1887,12 +1887,12 @@ vuint64m4_t test_sf_vc_v_ivw_u32m2(vuint64m4_t vd, vuint32m2_t vs2, size_t vl) {
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u32m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivw.nxv8i64.i32.nxv8i32.i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivw.nxv8i64.i32.nxv8i64.nxv8i32.i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivw.nxv8i64.i64.nxv8i32.i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivw.nxv8i64.i64.nxv8i64.nxv8i32.i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_sf_vc_v_ivw_u32m4(vuint64m8_t vd, vuint32m4_t vs2, size_t vl) {
@@ -2027,12 +2027,12 @@ void test_sf_vc_fvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, float fs1, size_t
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u16mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv1i32.i32.nxv1i16.f16.i32(i32 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv1i32.i32.nxv1i32.nxv1i16.f16.i32(i32 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv1i32.i64.nxv1i16.f16.i64(i64 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv1i32.i64.nxv1i32.nxv1i16.f16.i64(i64 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_sf_vc_v_fvw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, _Float16 fs1, size_t vl) {
@@ -2041,12 +2041,12 @@ vuint32mf2_t test_sf_vc_v_fvw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, _Floa
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u16mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv2i32.i32.nxv2i16.f16.i32(i32 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv2i32.i32.nxv2i32.nxv2i16.f16.i32(i32 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv2i32.i64.nxv2i16.f16.i64(i64 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv2i32.i64.nxv2i32.nxv2i16.f16.i64(i64 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_sf_vc_v_fvw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, _Float16 fs1, size_t vl) {
@@ -2055,12 +2055,12 @@ vuint32m1_t test_sf_vc_v_fvw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, _Float1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u16m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv4i32.i32.nxv4i16.f16.i32(i32 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv4i32.i32.nxv4i32.nxv4i16.f16.i32(i32 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv4i32.i64.nxv4i16.f16.i64(i64 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv4i32.i64.nxv4i32.nxv4i16.f16.i64(i64 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_sf_vc_v_fvw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, _Float16 fs1, size_t vl) {
@@ -2069,12 +2069,12 @@ vuint32m2_t test_sf_vc_v_fvw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, _Float16
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u16m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv8i32.i32.nxv8i16.f16.i32(i32 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv8i32.i32.nxv8i32.nxv8i16.f16.i32(i32 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv8i32.i64.nxv8i16.f16.i64(i64 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv8i32.i64.nxv8i32.nxv8i16.f16.i64(i64 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_sf_vc_v_fvw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, _Float16 fs1, size_t vl) {
@@ -2083,12 +2083,12 @@ vuint32m4_t test_sf_vc_v_fvw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, _Float16
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u16m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv16i32.i32.nxv16i16.f16.i32(i32 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv16i32.i32.nxv16i32.nxv16i16.f16.i32(i32 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv16i32.i64.nxv16i16.f16.i64(i64 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv16i32.i64.nxv16i32.nxv16i16.f16.i64(i64 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_sf_vc_v_fvw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, _Float16 fs1, size_t vl) {
@@ -2097,12 +2097,12 @@ vuint32m8_t test_sf_vc_v_fvw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, _Float16
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u32mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv1i64.i32.nxv1i32.f32.i32(i32 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv1i64.i32.nxv1i64.nxv1i32.f32.i32(i32 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv1i64.i64.nxv1i32.f32.i64(i64 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv1i64.i64.nxv1i64.nxv1i32.f32.i64(i64 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_sf_vc_v_fvw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, float fs1, size_t vl) {
@@ -2111,12 +2111,12 @@ vuint64m1_t test_sf_vc_v_fvw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, float f
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u32m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv2i64.i32.nxv2i32.f32.i32(i32 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv2i64.i32.nxv2i64.nxv2i32.f32.i32(i32 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv2i64.i64.nxv2i32.f32.i64(i64 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv2i64.i64.nxv2i64.nxv2i32.f32.i64(i64 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_sf_vc_v_fvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, float fs1, size_t vl) {
@@ -2125,12 +2125,12 @@ vuint64m2_t test_sf_vc_v_fvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, float fs1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u32m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv4i64.i32.nxv4i32.f32.i32(i32 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv4i64.i32.nxv4i64.nxv4i32.f32.i32(i32 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv4i64.i64.nxv4i32.f32.i64(i64 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv4i64.i64.nxv4i64.nxv4i32.f32.i64(i64 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_sf_vc_v_fvw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, float fs1, size_t vl) {
@@ -2139,12 +2139,12 @@ vuint64m4_t test_sf_vc_v_fvw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, float fs1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u32m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv8i64.i32.nxv8i32.f32.i32(i32 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv8i64.i32.nxv8i64.nxv8i32.f32.i32(i32 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv8i64.i64.nxv8i32.f32.i64(i64 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv8i64.i64.nxv8i64.nxv8i32.f32.i64(i64 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_sf_vc_v_fvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, float fs1, size_t vl) {
@@ -2153,12 +2153,12 @@ vuint64m8_t test_sf_vc_v_fvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, float fs1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u16mf4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvw.nxv1i32.i32.nxv1i16.f16.i32(i32 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvw.nxv1i32.i32.nxv1i32.nxv1i16.f16.i32(i32 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvw.nxv1i32.i64.nxv1i16.f16.i64(i64 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvw.nxv1i32.i64.nxv1i32.nxv1i16.f16.i64(i64 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_sf_vc_v_fvw_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, _Float16 fs1, size_t vl) {
@@ -2167,12 +2167,12 @@ vuint32mf2_t test_sf_vc_v_fvw_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, _Float16
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u16mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvw.nxv2i32.i32.nxv2i16.f16.i32(i32 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvw.nxv2i32.i32.nxv2i32.nxv2i16.f16.i32(i32 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvw.nxv2i32.i64.nxv2i16.f16.i64(i64 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvw.nxv2i32.i64.nxv2i32.nxv2i16.f16.i64(i64 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_sf_vc_v_fvw_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, _Float16 fs1, size_t vl) {
@@ -2181,12 +2181,12 @@ vuint32m1_t test_sf_vc_v_fvw_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, _Float16 f
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u16m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvw.nxv4i32.i32.nxv4i16.f16.i32(i32 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvw.nxv4i32.i32.nxv4i32.nxv4i16.f16.i32(i32 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvw.nxv4i32.i64.nxv4i16.f16.i64(i64 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvw.nxv4i32.i64.nxv4i32.nxv4i16.f16.i64(i64 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_sf_vc_v_fvw_u16m1(vuint32m2_t vd, vuint16m1_t vs2, _Float16 fs1, size_t vl) {
@@ -2195,12 +2195,12 @@ vuint32m2_t test_sf_vc_v_fvw_u16m1(vuint32m2_t vd, vuint16m1_t vs2, _Float16 fs1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u16m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvw.nxv8i32.i32.nxv8i16.f16.i32(i32 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvw.nxv8i32.i32.nxv8i32.nxv8i16.f16.i32(i32 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvw.nxv8i32.i64.nxv8i16.f16.i64(i64 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvw.nxv8i32.i64.nxv8i32.nxv8i16.f16.i64(i64 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_sf_vc_v_fvw_u16m2(vuint32m4_t vd, vuint16m2_t vs2, _Float16 fs1, size_t vl) {
@@ -2209,12 +2209,12 @@ vuint32m4_t test_sf_vc_v_fvw_u16m2(vuint32m4_t vd, vuint16m2_t vs2, _Float16 fs1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u16m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvw.nxv16i32.i32.nxv16i16.f16.i32(i32 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvw.nxv16i32.i32.nxv16i32.nxv16i16.f16.i32(i32 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvw.nxv16i32.i64.nxv16i16.f16.i64(i64 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvw.nxv16i32.i64.nxv16i32.nxv16i16.f16.i64(i64 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_sf_vc_v_fvw_u16m4(vuint32m8_t vd, vuint16m4_t vs2, _Float16 fs1, size_t vl) {
@@ -2223,12 +2223,12 @@ vuint32m8_t test_sf_vc_v_fvw_u16m4(vuint32m8_t vd, vuint16m4_t vs2, _Float16 fs1
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u32mf2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvw.nxv1i64.i32.nxv1i32.f32.i32(i32 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvw.nxv1i64.i32.nxv1i64.nxv1i32.f32.i32(i32 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvw.nxv1i64.i64.nxv1i32.f32.i64(i64 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvw.nxv1i64.i64.nxv1i64.nxv1i32.f32.i64(i64 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_sf_vc_v_fvw_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, float fs1, size_t vl) {
@@ -2237,12 +2237,12 @@ vuint64m1_t test_sf_vc_v_fvw_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, float fs1,
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u32m1(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvw.nxv2i64.i32.nxv2i32.f32.i32(i32 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvw.nxv2i64.i32.nxv2i64.nxv2i32.f32.i32(i32 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvw.nxv2i64.i64.nxv2i32.f32.i64(i64 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvw.nxv2i64.i64.nxv2i64.nxv2i32.f32.i64(i64 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_sf_vc_v_fvw_u32m1(vuint64m2_t vd, vuint32m1_t vs2, float fs1, size_t vl) {
@@ -2251,12 +2251,12 @@ vuint64m2_t test_sf_vc_v_fvw_u32m1(vuint64m2_t vd, vuint32m1_t vs2, float fs1, s
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u32m2(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvw.nxv4i64.i32.nxv4i32.f32.i32(i32 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvw.nxv4i64.i32.nxv4i64.nxv4i32.f32.i32(i32 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvw.nxv4i64.i64.nxv4i32.f32.i64(i64 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvw.nxv4i64.i64.nxv4i64.nxv4i32.f32.i64(i64 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_sf_vc_v_fvw_u32m2(vuint64m4_t vd, vuint32m2_t vs2, float fs1, size_t vl) {
@@ -2265,12 +2265,12 @@ vuint64m4_t test_sf_vc_v_fvw_u32m2(vuint64m4_t vd, vuint32m2_t vs2, float fs1, s
 
 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u32m4(
 // CHECK-RV32-NEXT:  entry:
-// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvw.nxv8i64.i32.nxv8i32.f32.i32(i32 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvw.nxv8i64.i32.nxv8i64.nxv8i32.f32.i32(i32 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
 // CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvw.nxv8i64.i64.nxv8i32.f32.i64(i64 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvw.nxv8i64.i64.nxv8i64.nxv8i32.f32.i64(i64 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_sf_vc_v_fvw_u32m4(vuint64m8_t vd, vuint32m4_t vs2, float fs1, size_t vl) {
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td b/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td
index c8d24ec7d83addf..6cfd54fa0e32302 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td
@@ -42,7 +42,7 @@ let TargetPrefix = "riscv" in {
   //        (bit<27-26>, bit<11-7>, vector_in, vector_in/scalar_in, vl)
   class RISCVSFCustomVC_XV<bit HasDst, bit HasSE, bit ImmScalar>
         : Intrinsic<!if(HasDst, [llvm_anyvector_ty], []),
-                    !listconcat(!if(HasDst, [llvm_anyint_ty, LLVMMatchType<0>],
+                    !listconcat(!if(HasDst, [llvm_anyint_ty, llvm_anyvector_ty],
                                             [llvm_anyint_ty, LLVMMatchType<0>, llvm_anyvector_ty]),
                                 [llvm_any_ty, llvm_anyint_ty]),
                     !listconcat([IntrNoMem, ImmArg<ArgIndex<0>>],                        // bit<27-26>
@@ -61,8 +61,8 @@ let TargetPrefix = "riscv" in {
   //        (bit<27-26>, vector_in, vector_in, vector_in/scalar_in, vl)
   class RISCVSFCustomVC_XVV<bit HasDst, bit HasSE, bit ImmScalar>
         : Intrinsic<!if(HasDst, [llvm_anyvector_ty], []),
-                    !listconcat(!if(HasDst, [llvm_anyint_ty, LLVMMatchType<0>, LLVMMatchType<0>],
-                                            [llvm_anyint_ty, llvm_anyvector_ty, LLVMMatchType<1>]),
+                    !listconcat(!if(HasDst, [llvm_anyint_ty, llvm_anyvector_ty, llvm_anyvector_ty],
+                                            [llvm_anyint_ty, llvm_anyvector_ty, llvm_anyvector_ty]),
                                 [llvm_any_ty, llvm_anyint_ty]),
                     !listconcat([IntrNoMem, ImmArg<ArgIndex<0>>],                        // bit<27-26>
                                 !if(ImmScalar, [ImmArg<ArgIndex<3>>], []),               // ScalarOperand
@@ -76,7 +76,7 @@ let TargetPrefix = "riscv" in {
   //        (bit<27-26>, wvector_in, vector_in, vector_in/scalar_in, vl)
   class RISCVSFCustomVC_XVW<bit HasDst, bit HasSE, bit ImmScalar>
         : Intrinsic<!if(HasDst, [llvm_anyvector_ty], []),
-                    !listconcat(!if(HasDst, [llvm_anyint_ty, LLVMMatchType<0>, llvm_anyvector_ty],
+                    !listconcat(!if(HasDst, [llvm_anyint_ty, llvm_anyvector_ty, llvm_anyvector_ty],
                                             [llvm_anyint_ty, llvm_anyvector_ty, llvm_anyvector_ty]),
                                 [llvm_any_ty, llvm_anyint_ty]),
                     !listconcat([IntrNoMem, ImmArg<ArgIndex<0>>],                        // bit<27-26>
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index a0965763e3235e4..9b3c1502b165621 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -7992,6 +7992,28 @@ static SDValue lowerGetVectorLength(SDNode *N, SelectionDAG &DAG,
   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XLenVT, ID, AVL, Sew, LMul);
 }
 
+static void getVCIXOperands(SDValue &Op, SelectionDAG &DAG, SmallVector<SDValue> &Ops) {
+  SDLoc DL(Op);
+
+  const RISCVSubtarget &Subtarget = DAG.getMachineFunction().getSubtarget<RISCVSubtarget>();
+  for (const SDValue &V : Op->op_values()) {
+    EVT ValType = V.getValueType();
+    if (ValType.isFixedLengthVector()) {
+      MVT OpContainerVT =
+          getContainerForFixedLengthVector(DAG, V.getSimpleValueType(), Subtarget);
+      Ops.push_back(convertToScalableVector(OpContainerVT, V, DAG, Subtarget));
+      continue;
+    }
+
+    if (ValType.isScalableVector() && ValType.isFloatingPoint()) {
+      MVT InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(ValType.getScalarSizeInBits()),
+                                        ValType.getVectorElementCount());
+      Ops.push_back(DAG.getNode(ISD::BITCAST, DL, InterimIVT, V));
+    } else
+      Ops.push_back(V);
+  }
+}
+
 // LMUL * VLEN should be greater than or equal to EGS * SEW
 static inline bool isValidEGW(int EGS, EVT VT,
                               const RISCVSubtarget &Subtarget) {
@@ -8178,26 +8200,23 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
   case Intrinsic::riscv_sf_vc_v_fvw: {
     MVT VT = Op.getSimpleValueType();
 
-    if (!VT.isFixedLengthVector())
-      break;
+    SmallVector<SDValue> Ops;
+    getVCIXOperands(Op, DAG, Ops);
 
-    SmallVector<SDValue, 6> Ops;
-    for (const SDValue &V : Op->op_values()) {
-      // Skip non-fixed vector operands.
-      if (!V.getValueType().isFixedLengthVector()) {
-        Ops.push_back(V);
-        continue;
-      }
+    MVT RetVT = VT;
+    if (VT.isFixedLengthVector())
+      RetVT = getContainerForFixedLengthVector(VT);
 
-      MVT OpContainerVT =
-          getContainerForFixedLengthVector(V.getSimpleValueType());
-      Ops.push_back(convertToScalableVector(OpContainerVT, V, DAG, Subtarget));
-    }
+    SDValue NewNode =
+        DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, RetVT, Ops);
+
+    if (VT.isFixedLengthVector())
+      NewNode = convertFromScalableVector(VT, NewNode, DAG, Subtarget);
 
-    MVT RetContainerVT = getContainerForFixedLengthVector(VT);
-    SDValue Scalable =
-        DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, RetContainerVT, Ops);
-    return convertFromScalableVector(VT, Scalable, DAG, Subtarget);
+    if (Op == NewNode)
+      break;
+
+    return NewNode;
   }
   }
 
@@ -8334,30 +8353,27 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
   case Intrinsic::riscv_sf_vc_v_vvw_se:
   case Intrinsic::riscv_sf_vc_v_fvw_se: {
     MVT VT = Op.getSimpleValueType();
+    SDLoc DL(Op);
 
-    if (!VT.isFixedLengthVector())
-      break;
+    SmallVector<SDValue> Ops;
+    getVCIXOperands(Op, DAG, Ops);
 
-    SmallVector<SDValue, 6> Ops;
-    for (const SDValue &V : Op->op_values()) {
-      // Skip non-fixed vector operands.
-      if (!V.getValueType().isFixedLengthVector()) {
-        Ops.push_back(V);
-        continue;
-      }
+    MVT RetVT = VT;
+    if (VT.isFixedLengthVector())
+      RetVT = getContainerForFixedLengthVector(VT);
+    SDVTList VTs = DAG.getVTList({RetVT, MVT::Other});
+    SDValue NewNode = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops);
 
-      MVT OpContainerVT =
-          getContainerForFixedLengthVector(V.getSimpleValueType());
-      Ops.push_back(convertToScalableVector(OpContainerVT, V, DAG, Subtarget));
+    if (VT.isFixedLengthVector()) {
+      SDValue FixedVector =
+          convertFromScalableVector(VT, NewNode, DAG, Subtarget);
+      NewNode = DAG.getMergeValues({FixedVector, NewNode.getValue(1)}, DL);
     }
 
-    SDLoc DL(Op);
-    MVT RetContainerVT = getContainerForFixedLengthVector(VT);
-    SDVTList VTs = DAG.getVTList({RetContainerVT, MVT::Other});
-    SDValue ScalableVector = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops);
-    SDValue FixedVector =
-        convertFromScalableVector(VT, ScalableVector, DAG, Subtarget);
-    return DAG.getMergeValues({FixedVector, ScalableVector.getValue(1)}, DL);
+    if (Op == NewNode)
+      break;
+
+    return NewNode;
   }
   }
 
@@ -8502,25 +8518,15 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
   case Intrinsic::riscv_sf_vc_ivw_se:
   case Intrinsic::riscv_sf_vc_vvw_se:
   case Intrinsic::riscv_sf_vc_fvw_se: {
-    if (!llvm::any_of(Op->op_values(), [&](const SDValue &V) {
-          return V.getValueType().isFixedLengthVector();
-        }))
-      break;
+    SmallVector<SDValue> Ops;
+    getVCIXOperands(Op, DAG, Ops);
 
-    SmallVector<SDValue, 6> Ops;
-    for (const SDValue &V : Op->op_values()) {
-      // Skip non-fixed vector operands.
-      if (!V.getValueType().isFixedLengthVector()) {
-        Ops.push_back(V);
-        continue;
-      }
+    SDValue NewNode = DAG.getNode(ISD::INTRINSIC_VOID, SDLoc(Op), Op->getVTList(), Ops);
 
-      MVT OpContainerVT =
-          getContainerForFixedLengthVector(V.getSimpleValueType());
-      Ops.push_back(convertToScalableVector(OpContainerVT, V, DAG, Subtarget));
-    }
+    if (Op == NewNode)
+      break;
 
-    return DAG.getNode(ISD::INTRINSIC_VOID, SDLoc(Op), Op->getVTList(), Ops);
+    return NewNode;
   }
   }
 
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
index fa6a1af6a05e9a5..d6ae35035f605ad 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
@@ -416,6 +416,18 @@ multiclass VPatVC_X<string intrinsic_suffix, string instruction_suffix,
                      "PseudoVC_V_" # instruction_suffix # "_" # vti.LMul.MX,
                      vti.Vector, XLenVT, type, vti.Log2SEW,
                      payload5, kind>;
+  // Add another patterns for float type return value.
+  if !ne(vti.SEW, 8) then {
+    defvar fti = !cast<VTypeInfo>("VF"#vti.SEW#vti.LMul.MX);
+    def : VPatVC_V_OP3<"int_riscv_sf_vc_v_" # intrinsic_suffix # "_se",
+                       "PseudoVC_V_" # instruction_suffix # "_SE_" # vti.LMul.MX,
+                       fti.Vector, XLenVT, type, vti.Log2SEW,
+                       payload5, kind>;
+    def : VPatVC_V_OP3<"int_riscv_sf_vc_v_" # intrinsic_suffix,
+                       "PseudoVC_V_" # instruction_suffix # "_" # vti.LMul.MX,
+                       fti.Vector, XLenVT, type, vti.Log2SEW,
+                       payload5, kind>;
+  }
 }
 
 multiclass VPatVC_XV<string intrinsic_suffix, string instruction_suffix,
@@ -433,6 +445,18 @@ multiclass VPatVC_XV<string intrinsic_suffix, string instruction_suffix,
                      "PseudoVC_V_" # instruction_suffix # "_" # vti.LMul.MX,
                      vti.Vector, vti.Vector, type, vti.Log2SEW,
                      vti.RegClass, kind, op1_kind>;
+  // Add another patterns for float type return value.
+  if !ne(vti.SEW, 8) then {
+    defvar fti = !cast<VTypeInfo>("VF"#vti.SEW#vti.LMul.MX);
+    def : VPatVC_V_OP3<"int_riscv_sf_vc_v_" # intrinsic_suffix # "_se",
+                       "PseudoVC_V_" # instruction_suffix # "_SE_" # vti.LMul.MX,
+                       fti.Vector, vti.Vector, type, vti.Log2SEW,
+                       vti.RegClass, kind, op1_kind>;
+    def : VPatVC_V_OP3<"int_riscv_sf_vc_v_" # intrinsic_suffix,
+                       "PseudoVC_V_" # instruction_suffix # "_" # vti.LMul.MX,
+                       fti.Vector, vti.Vector, type, vti.Log2SEW,
+                       vti.RegClass, kind, op1_kind>;
+  }
 }
 
 multiclass VPatVC_XVV<string intrinsic_suffix, string instruction_suffix,
@@ -450,6 +474,18 @@ multiclass VPatVC_XVV<string intrinsic_suffix, string instruction_suffix,
                      "PseudoVC_V_" # instruction_suffix # "_" # vti.LMul.MX,
                      wti.Vector, wti.Vector, vti.Vector, type, vti.Log2SEW,
                      wti.RegClass, vti.RegClass, kind, op1_kind>;
+  // Add another patterns for float type return value.
+  if !ne(wti.SEW, 8) then {
+    defvar wfti = !cast<VTypeInfo>("VF"#wti.SEW#wti.LMul.MX);
+    def : VPatVC_V_OP4<"int_riscv_sf_vc_v_" # intrinsic_suffix # "_se",
+                       "PseudoVC_V_" # instruction_suffix # "_SE_" # vti.LMul.MX,
+                       wfti.Vector, wti.Vector, vti.Vector, type, vti.Log2SEW,
+                       wti.RegClass, vti.RegClass, kind, op1_kind>;
+    def : VPatVC_V_OP4<"int_riscv_sf_vc_v_" # intrinsic_suffix,
+                       "PseudoVC_V_" # instruction_suffix # "_" # vti.LMul.MX,
+                       wfti.Vector, wti.Vector, vti.Vector, type, vti.Log2SEW,
+                       wti.RegClass, vti.RegClass, kind, op1_kind>;
+  }
 }
 
 class GetFTypeInfo<int Sew> {
@@ -466,7 +502,7 @@ class GetFTypeInfo<int Sew> {
 }
 
 let Predicates = [HasVendorXSfvcp] in {
-  foreach vti = AllVectors in {
+  foreach vti = AllIntegerVectors in {
     defm : VPatVC_X<"x", "X", vti, XLenVT, GPR>;
     defm : VPatVC_X<"i", "I", vti, XLenVT, tsimm5>;
     defm : VPatVC_XV<"xv", "XV", vti, XLenVT, GPR>;
@@ -484,7 +520,7 @@ let Predicates = [HasVendorXSfvcp] in {
                         finfo.ScalarRegClass, payload1>;
     }
   }
-  foreach VtiToWti = !listconcat(AllWidenableIntVectors, AllWidenableFloatVectors) in {
+  foreach VtiToWti = AllWidenableIntVectors in {
     defvar vti = VtiToWti.Vti;
     defvar wti = VtiToWti.Wti;
     defvar iinfo = GetIntVTypeInfo<vti>.Vti;

>From 959e41d2920e265208b731f9eae286d8dad2ea17 Mon Sep 17 00:00:00 2001
From: Brandon Wu <brandon.wu at sifive.com>
Date: Wed, 18 Oct 2023 21:19:01 -0700
Subject: [PATCH 2/2] fixup! [RISCV] Convert all floating point vector type
 operands to integer vector type

---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 21 ++++++++++++---------
 1 file changed, 12 insertions(+), 9 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 9b3c1502b165621..bb25ed9f5756b38 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -7992,22 +7992,25 @@ static SDValue lowerGetVectorLength(SDNode *N, SelectionDAG &DAG,
   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XLenVT, ID, AVL, Sew, LMul);
 }
 
-static void getVCIXOperands(SDValue &Op, SelectionDAG &DAG, SmallVector<SDValue> &Ops) {
+static void getVCIXOperands(SDValue &Op, SelectionDAG &DAG,
+                            SmallVector<SDValue> &Ops) {
   SDLoc DL(Op);
 
-  const RISCVSubtarget &Subtarget = DAG.getMachineFunction().getSubtarget<RISCVSubtarget>();
+  const RISCVSubtarget &Subtarget =
+      DAG.getMachineFunction().getSubtarget<RISCVSubtarget>();
   for (const SDValue &V : Op->op_values()) {
     EVT ValType = V.getValueType();
     if (ValType.isFixedLengthVector()) {
-      MVT OpContainerVT =
-          getContainerForFixedLengthVector(DAG, V.getSimpleValueType(), Subtarget);
+      MVT OpContainerVT = getContainerForFixedLengthVector(
+          DAG, V.getSimpleValueType(), Subtarget);
       Ops.push_back(convertToScalableVector(OpContainerVT, V, DAG, Subtarget));
       continue;
     }
 
     if (ValType.isScalableVector() && ValType.isFloatingPoint()) {
-      MVT InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(ValType.getScalarSizeInBits()),
-                                        ValType.getVectorElementCount());
+      MVT InterimIVT =
+          MVT::getVectorVT(MVT::getIntegerVT(ValType.getScalarSizeInBits()),
+                           ValType.getVectorElementCount());
       Ops.push_back(DAG.getNode(ISD::BITCAST, DL, InterimIVT, V));
     } else
       Ops.push_back(V);
@@ -8207,8 +8210,7 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
     if (VT.isFixedLengthVector())
       RetVT = getContainerForFixedLengthVector(VT);
 
-    SDValue NewNode =
-        DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, RetVT, Ops);
+    SDValue NewNode = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, RetVT, Ops);
 
     if (VT.isFixedLengthVector())
       NewNode = convertFromScalableVector(VT, NewNode, DAG, Subtarget);
@@ -8521,7 +8523,8 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
     SmallVector<SDValue> Ops;
     getVCIXOperands(Op, DAG, Ops);
 
-    SDValue NewNode = DAG.getNode(ISD::INTRINSIC_VOID, SDLoc(Op), Op->getVTList(), Ops);
+    SDValue NewNode =
+        DAG.getNode(ISD::INTRINSIC_VOID, SDLoc(Op), Op->getVTList(), Ops);
 
     if (Op == NewNode)
       break;



More information about the cfe-commits mailing list