[llvm] 408b081 - [RISCV] Support floating point VCIX (#67094)

via llvm-commits llvm-commits at lists.llvm.org
Sun Sep 24 22:19:26 PDT 2023


Author: Brandon Wu
Date: 2023-09-25T13:19:21+08:00
New Revision: 408b0810ba7c637236e1d1d9eccf0c71daede5a9

URL: https://github.com/llvm/llvm-project/commit/408b0810ba7c637236e1d1d9eccf0c71daede5a9
DIFF: https://github.com/llvm/llvm-project/commit/408b0810ba7c637236e1d1d9eccf0c71daede5a9.diff

LOG: [RISCV] Support floating point VCIX (#67094)

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
    llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll
    llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv.ll
    llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll
    llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvw.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
index a64fc3403335d23..3975b8426256ac7 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
@@ -463,37 +463,51 @@ multiclass VPatVC_XVV<string intrinsic_suffix, string instruction_suffix,
                      wti.RegClass, vti.RegClass, kind, op1_kind>;
 }
 
+class GetFTypeInfo<int Sew> {
+  ValueType Scalar = !cond(!eq(Sew, 16) : f16,
+                           !eq(Sew, 32) : f32,
+                           !eq(Sew, 64) : f64);
+  RegisterClass ScalarRegClass = !cond(!eq(Sew, 16) : FPR16,
+                                       !eq(Sew, 32) : FPR32,
+                                       !eq(Sew, 64) : FPR64);
+
+  string ScalarSuffix = !cond(!eq(Scalar, f16) : "FPR16",
+                              !eq(Scalar, f32) : "FPR32",
+                              !eq(Scalar, f64) : "FPR64");
+}
+
 let Predicates = [HasVendorXSfvcp] in {
-  foreach vti = AllIntegerVectors in {
-    defm : VPatVC_X<"x", "X", vti, vti.Scalar, vti.ScalarRegClass>;
+  foreach vti = AllVectors in {
+    defm : VPatVC_X<"x", "X", vti, XLenVT, GPR>;
     defm : VPatVC_X<"i", "I", vti, XLenVT, tsimm5>;
-    defm : VPatVC_XV<"xv", "XV", vti, vti.Scalar, vti.ScalarRegClass>;
+    defm : VPatVC_XV<"xv", "XV", vti, XLenVT, GPR>;
     defm : VPatVC_XV<"iv", "IV", vti, XLenVT, tsimm5>;
     defm : VPatVC_XV<"vv", "VV", vti, vti.Vector, vti.RegClass>;
-    defm : VPatVC_XVV<"xvv", "XVV", vti, vti, vti.Scalar, vti.ScalarRegClass>;
+    defm : VPatVC_XVV<"xvv", "XVV", vti, vti, XLenVT, GPR>;
     defm : VPatVC_XVV<"ivv", "IVV", vti, vti, XLenVT, tsimm5>;
     defm : VPatVC_XVV<"vvv", "VVV", vti, vti, vti.Vector, vti.RegClass>;
+
+    if !ne(vti.SEW, 8) then {
+      defvar finfo = GetFTypeInfo<vti.SEW>;
+      defm : VPatVC_XV<"fv", finfo.ScalarSuffix # "V", vti, finfo.Scalar,
+                       finfo.ScalarRegClass, payload1>;
+      defm : VPatVC_XVV<"fvv", finfo.ScalarSuffix # "VV", vti, vti, finfo.Scalar,
+                        finfo.ScalarRegClass, payload1>;
+    }
   }
-  foreach fvti = AllFloatVectors in {
-    defvar ivti = GetIntVTypeInfo<fvti>.Vti;
-    defm : VPatVC_XV<"fv", fvti.ScalarSuffix # "V", ivti, fvti.Scalar,
-                     fvti.ScalarRegClass, payload1>;
-    defm : VPatVC_XVV<"fvv", fvti.ScalarSuffix # "VV", ivti, ivti, fvti.Scalar,
-                      fvti.ScalarRegClass, payload1>;
-  }
-  foreach VtiToWti = AllWidenableIntVectors in {
+  foreach VtiToWti = !listconcat(AllWidenableIntVectors, AllWidenableFloatVectors) in {
     defvar vti = VtiToWti.Vti;
     defvar wti = VtiToWti.Wti;
-    defm : VPatVC_XVV<"xvw", "XVW", wti, vti, vti.Scalar, vti.ScalarRegClass>;
+    defvar iinfo = GetIntVTypeInfo<vti>.Vti;
+    defm : VPatVC_XVV<"xvw", "XVW", wti, vti, iinfo.Scalar, iinfo.ScalarRegClass>;
     defm : VPatVC_XVV<"ivw", "IVW", wti, vti, XLenVT, tsimm5>;
     defm : VPatVC_XVV<"vvw", "VVW", wti, vti, vti.Vector, vti.RegClass>;
-  }
-  foreach VtiToWti = AllWidenableFloatVectors in {
-    defvar fvti = VtiToWti.Vti;
-    defvar iwti = GetIntVTypeInfo<VtiToWti.Wti>.Vti;
-    defvar ivti = GetIntVTypeInfo<fvti>.Vti;
-    defm : VPatVC_XVV<"fvw", fvti.ScalarSuffix # "VW", iwti, ivti, fvti.Scalar,
-                      fvti.ScalarRegClass, payload1>;
+
+    if !ne(vti.SEW, 8) then {
+      defvar finfo = GetFTypeInfo<vti.SEW>;
+      defm : VPatVC_XVV<"fvw", finfo.ScalarSuffix # "VW", wti, vti, finfo.Scalar,
+                        finfo.ScalarRegClass, payload1>;
+    }
   }
 }
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll
index 250a8bd1df962f2..59124ed8179414c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN:  sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+xsfvcp \
+; RUN:  sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+xsfvcp,+zvfh \
 ; RUN:    -verify-machineinstrs | FileCheck %s
-; RUN:  sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvcp \
+; RUN:  sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvcp,+zvfh \
 ; RUN:    -verify-machineinstrs | FileCheck %s
 
 define void @test_sf_vc_x_se_e8mf8(i8 zeroext %rs1, iXLen %vl) {
@@ -1563,3 +1563,393 @@ entry:
 }
 
 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.i.nxv8i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_i_se_e16mf4(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.i.se.nxv1f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.i.se.nxv1f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_i_se_e16mf2(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.i.se.nxv2f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.i.se.nxv2f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_i_se_e16m1(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.i.se.nxv4f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.i.se.nxv4f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_i_se_e16m2(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.i.se.nxv8f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.i.se.nxv8f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_i_se_e16m4(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.i.se.nxv16f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.i.se.nxv16f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_i_se_e16m8(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.i.se.nxv32f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.i.se.nxv32f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_i_se_e32mf2(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.i.se.nxv1f32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.i.se.nxv1f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_i_se_e32m1(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.i.se.nxv2f32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.i.se.nxv2f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_i_se_e32m2(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.i.se.nxv4f32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.i.se.nxv4f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_i_se_e32m4(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.i.se.nxv8f32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.i.se.nxv8f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_i_se_e32m8(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.i.se.nxv16f32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.i.se.nxv16f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_i_se_e64m1(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.i.se.nxv1f64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.i.se.nxv1f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_i_se_e64m2(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.i.se.nxv2f64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.i.se.nxv2f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_i_se_e64m4(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.i.se.nxv4f64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.i.se.nxv4f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_i_se_e64m8(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.i.se.nxv8f64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.i.se.nxv8f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_i_e16mf4(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.i.nxv1f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.i.nxv1f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_i_e16mf2(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.i.nxv2f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.i.nxv2f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_i_e16m1(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.i.nxv4f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.i.nxv4f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_i_e16m2(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.i.nxv8f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.i.nxv8f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_i_e16m4(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.i.nxv16f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.i.nxv16f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_i_e16m8(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.i.nxv32f16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.i.nxv32f16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_i_e32mf2(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.i.nxv1f32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.i.nxv1f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_i_e32m1(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.i.nxv2f32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.i.nxv2f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_i_e32m2(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.i.nxv4f32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.i.nxv4f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_i_e32m4(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.i.nxv8f32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.i.nxv8f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_i_e32m8(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.i.nxv16f32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.i.nxv16f32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_i_e64m1(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.i.nxv1f64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.i.nxv1f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_i_e64m2(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.i.nxv2f64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.i.nxv2f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_i_e64m4(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.i.nxv4f64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.i.nxv4f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_i_e64m8(iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_i_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.i.nxv8f64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.i.nxv8f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv.ll
index d4063180c63e27c..91ba4242123299f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN:  sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+xsfvcp \
+; RUN:  sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+xsfvcp \
 ; RUN:    -verify-machineinstrs | FileCheck %s
-; RUN:  sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+xsfvcp \
+; RUN:  sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+xsfvcp \
 ; RUN:    -verify-machineinstrs | FileCheck %s
 
 define void @test_sf_vc_vv_se_e8mf8(<vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
@@ -3006,3 +3006,2187 @@ entry:
 }
 
 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fv.nxv8i64.iXLen.f64.iXLen(iXLen, <vscale x 8 x i64>, double, iXLen)
+
+define void @test_f_sf_vc_vv_se_e16mf4(<vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f16.nxv1f16.iXLen(iXLen 3, iXLen 31, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f16.nxv1f16.iXLen(iXLen, iXLen, <vscale x 1 x half>, <vscale x 1 x half>, iXLen)
+
+define void @test_f_sf_vc_vv_se_e16mf2(<vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f16.nxv2f16.iXLen(iXLen 3, iXLen 31, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f16.nxv2f16.iXLen(iXLen, iXLen, <vscale x 2 x half>, <vscale x 2 x half>, iXLen)
+
+define void @test_f_sf_vc_vv_se_e16m1(<vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f16.nxv4f16.iXLen(iXLen 3, iXLen 31, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f16.nxv4f16.iXLen(iXLen, iXLen, <vscale x 4 x half>, <vscale x 4 x half>, iXLen)
+
+define void @test_f_sf_vc_vv_se_e16m2(<vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f16.nxv8f16.iXLen(iXLen 3, iXLen 31, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f16.nxv8f16.iXLen(iXLen, iXLen, <vscale x 8 x half>, <vscale x 8 x half>, iXLen)
+
+define void @test_f_sf_vc_vv_se_e16m4(<vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16f16.nxv16f16.iXLen(iXLen 3, iXLen 31, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16f16.nxv16f16.iXLen(iXLen, iXLen, <vscale x 16 x half>, <vscale x 16 x half>, iXLen)
+
+define void @test_f_sf_vc_vv_se_e16m8(<vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32f16.nxv32f16.iXLen(iXLen 3, iXLen 31, <vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32f16.nxv32f16.iXLen(iXLen, iXLen, <vscale x 32 x half>, <vscale x 32 x half>, iXLen)
+
+define void @test_f_sf_vc_vv_se_e32mf2(<vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f32.nxv1f32.iXLen(iXLen 3, iXLen 31, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f32.nxv1f32.iXLen(iXLen, iXLen, <vscale x 1 x float>, <vscale x 1 x float>, iXLen)
+
+define void @test_f_sf_vc_vv_se_e32m1(<vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f32.nxv2f32.iXLen(iXLen 3, iXLen 31, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f32.nxv2f32.iXLen(iXLen, iXLen, <vscale x 2 x float>, <vscale x 2 x float>, iXLen)
+
+define void @test_f_sf_vc_vv_se_e32m2(<vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f32.nxv4f32.iXLen(iXLen 3, iXLen 31, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f32.nxv4f32.iXLen(iXLen, iXLen, <vscale x 4 x float>, <vscale x 4 x float>, iXLen)
+
+define void @test_f_sf_vc_vv_se_e32m4(<vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f32.nxv8f32.iXLen(iXLen 3, iXLen 31, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f32.nxv8f32.iXLen(iXLen, iXLen, <vscale x 8 x float>, <vscale x 8 x float>, iXLen)
+
+define void @test_f_sf_vc_vv_se_e32m8(<vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16f32.nxv16f32.iXLen(iXLen 3, iXLen 31, <vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16f32.nxv16f32.iXLen(iXLen, iXLen, <vscale x 16 x float>, <vscale x 16 x float>, iXLen)
+
+define void @test_f_sf_vc_vv_se_e64m1(<vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f64.nxv1f64.iXLen(iXLen 3, iXLen 31, <vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f64.nxv1f64.iXLen(iXLen, iXLen, <vscale x 1 x double>, <vscale x 1 x double>, iXLen)
+
+define void @test_f_sf_vc_vv_se_e64m2(<vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f64.nxv2f64.iXLen(iXLen 3, iXLen 31, <vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f64.nxv2f64.iXLen(iXLen, iXLen, <vscale x 2 x double>, <vscale x 2 x double>, iXLen)
+
+define void @test_f_sf_vc_vv_se_e64m4(<vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f64.nxv4f64.iXLen(iXLen 3, iXLen 31, <vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f64.nxv4f64.iXLen(iXLen, iXLen, <vscale x 4 x double>, <vscale x 4 x double>, iXLen)
+
+define void @test_f_sf_vc_vv_se_e64m8(<vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f64.nxv8f64.iXLen(iXLen 3, iXLen 31, <vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f64.nxv8f64.iXLen(iXLen, iXLen, <vscale x 8 x double>, <vscale x 8 x double>, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_vv_se_e16mf4(<vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.vv.se.nxv1f16.iXLen.nxv1f16.iXLen(iXLen 3, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.vv.se.nxv1f16.iXLen.nxv1f16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_vv_se_e16mf2(<vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.vv.se.nxv2f16.iXLen.nxv2f16.iXLen(iXLen 3, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.vv.se.nxv2f16.iXLen.nxv2f16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_vv_se_e16m1(<vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.vv.se.nxv4f16.iXLen.nxv4f16.iXLen(iXLen 3, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.vv.se.nxv4f16.iXLen.nxv4f16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_vv_se_e16m2(<vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.vv.se.nxv8f16.iXLen.nxv8f16.iXLen(iXLen 3, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.vv.se.nxv8f16.iXLen.nxv8f16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_vv_se_e16m4(<vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.vv.se.nxv16f16.iXLen.nxv16f16.iXLen(iXLen 3, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.vv.se.nxv16f16.iXLen.nxv16f16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_vv_se_e16m8(<vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.vv.se.nxv32f16.iXLen.nxv32f16.iXLen(iXLen 3, <vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.vv.se.nxv32f16.iXLen.nxv32f16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_vv_se_e32mf2(<vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.vv.se.nxv1f32.iXLen.nxv1f32.iXLen(iXLen 3, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.vv.se.nxv1f32.iXLen.nxv1f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_vv_se_e32m1(<vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.vv.se.nxv2f32.iXLen.nxv2f32.iXLen(iXLen 3, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.vv.se.nxv2f32.iXLen.nxv2f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_vv_se_e32m2(<vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vv.se.nxv4f32.iXLen.nxv4f32.iXLen(iXLen 3, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.vv.se.nxv4f32.iXLen.nxv4f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_vv_se_e32m4(<vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.vv.se.nxv8f32.iXLen.nxv8f32.iXLen(iXLen 3, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.vv.se.nxv8f32.iXLen.nxv8f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_vv_se_e32m8(<vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.vv.se.nxv16f32.iXLen.nxv16f32.iXLen(iXLen 3, <vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.vv.se.nxv16f32.iXLen.nxv16f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_vv_se_e64m1(<vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.vv.se.nxv1f64.iXLen.nxv1f64.iXLen(iXLen 3, <vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.vv.se.nxv1f64.iXLen.nxv1f64.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x double>, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_vv_se_e64m2(<vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.vv.se.nxv2f64.iXLen.nxv2f64.iXLen(iXLen 3, <vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.vv.se.nxv2f64.iXLen.nxv2f64.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x double>, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_vv_se_e64m4(<vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vv.se.nxv4f64.iXLen.nxv4f64.iXLen(iXLen 3, <vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.vv.se.nxv4f64.iXLen.nxv4f64.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x double>, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_vv_se_e64m8(<vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.vv.se.nxv8f64.iXLen.nxv8f64.iXLen(iXLen 3, <vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.vv.se.nxv8f64.iXLen.nxv8f64.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_vv_e16mf4(<vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.vv.nxv1f16.iXLen.nxv1f16.iXLen(iXLen 3, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.vv.nxv1f16.iXLen.nxv1f16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_vv_e16mf2(<vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.vv.nxv2f16.iXLen.nxv2f16.iXLen(iXLen 3, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.vv.nxv2f16.iXLen.nxv2f16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_vv_e16m1(<vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.vv.nxv4f16.iXLen.nxv4f16.iXLen(iXLen 3, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.vv.nxv4f16.iXLen.nxv4f16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_vv_e16m2(<vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.vv.nxv8f16.iXLen.nxv8f16.iXLen(iXLen 3, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.vv.nxv8f16.iXLen.nxv8f16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_vv_e16m4(<vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.vv.nxv16f16.iXLen.nxv16f16.iXLen(iXLen 3, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.vv.nxv16f16.iXLen.nxv16f16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_vv_e16m8(<vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.vv.nxv32f16.iXLen.nxv32f16.iXLen(iXLen 3, <vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.vv.nxv32f16.iXLen.nxv32f16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_vv_e32mf2(<vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.vv.nxv1f32.iXLen.nxv1f32.iXLen(iXLen 3, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.vv.nxv1f32.iXLen.nxv1f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_vv_e32m1(<vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.vv.nxv2f32.iXLen.nxv2f32.iXLen(iXLen 3, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.vv.nxv2f32.iXLen.nxv2f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_vv_e32m2(<vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vv.nxv4f32.iXLen.nxv4f32.iXLen(iXLen 3, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.vv.nxv4f32.iXLen.nxv4f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_vv_e32m4(<vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.vv.nxv8f32.iXLen.nxv8f32.iXLen(iXLen 3, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.vv.nxv8f32.iXLen.nxv8f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_vv_e32m8(<vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.vv.nxv16f32.iXLen.nxv16f32.iXLen(iXLen 3, <vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.vv.nxv16f32.iXLen.nxv16f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_vv_e64m1(<vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.vv.nxv1f64.iXLen.nxv1f64.iXLen(iXLen 3, <vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.vv.nxv1f64.iXLen.nxv1f64.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x double>, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_vv_e64m2(<vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.vv.nxv2f64.iXLen.nxv2f64.iXLen(iXLen 3, <vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.vv.nxv2f64.iXLen.nxv2f64.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x double>, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_vv_e64m4(<vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vv.nxv4f64.iXLen.nxv4f64.iXLen(iXLen 3, <vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.vv.nxv4f64.iXLen.nxv4f64.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x double>, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_vv_e64m8(<vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vv_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.vv.nxv8f64.iXLen.nxv8f64.iXLen(iXLen 3, <vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.vv.nxv8f64.iXLen.nxv8f64.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, iXLen)
+
+define void @test_f_sf_vc_xv_se_e16mf4(<vscale x 1 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1f16.i16.iXLen(iXLen 3, iXLen 31, <vscale x 1 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1f16.i16.iXLen(iXLen, iXLen, <vscale x 1 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xv_se_e16mf2(<vscale x 2 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2f16.i16.iXLen(iXLen 3, iXLen 31, <vscale x 2 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2f16.i16.iXLen(iXLen, iXLen, <vscale x 2 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xv_se_e16m1(<vscale x 4 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4f16.i16.iXLen(iXLen 3, iXLen 31, <vscale x 4 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4f16.i16.iXLen(iXLen, iXLen, <vscale x 4 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xv_se_e16m2(<vscale x 8 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8f16.i16.iXLen(iXLen 3, iXLen 31, <vscale x 8 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8f16.i16.iXLen(iXLen, iXLen, <vscale x 8 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xv_se_e16m4(<vscale x 16 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16f16.i16.iXLen(iXLen 3, iXLen 31, <vscale x 16 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16f16.i16.iXLen(iXLen, iXLen, <vscale x 16 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xv_se_e16m8(<vscale x 32 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32f16.i16.iXLen(iXLen 3, iXLen 31, <vscale x 32 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32f16.i16.iXLen(iXLen, iXLen, <vscale x 32 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xv_se_e32mf2(<vscale x 1 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1f32.i32.iXLen(iXLen 3, iXLen 31, <vscale x 1 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1f32.i32.iXLen(iXLen, iXLen, <vscale x 1 x float>, i32, iXLen)
+
+define void @test_f_sf_vc_xv_se_e32m1(<vscale x 2 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2f32.i32.iXLen(iXLen 3, iXLen 31, <vscale x 2 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2f32.i32.iXLen(iXLen, iXLen, <vscale x 2 x float>, i32, iXLen)
+
+define void @test_f_sf_vc_xv_se_e32m2(<vscale x 4 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4f32.i32.iXLen(iXLen 3, iXLen 31, <vscale x 4 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4f32.i32.iXLen(iXLen, iXLen, <vscale x 4 x float>, i32, iXLen)
+
+define void @test_f_sf_vc_xv_se_e32m4(<vscale x 8 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8f32.i32.iXLen(iXLen 3, iXLen 31, <vscale x 8 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8f32.i32.iXLen(iXLen, iXLen, <vscale x 8 x float>, i32, iXLen)
+
+define void @test_f_sf_vc_xv_se_e32m8(<vscale x 16 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16f32.i32.iXLen(iXLen 3, iXLen 31, <vscale x 16 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16f32.i32.iXLen(iXLen, iXLen, <vscale x 16 x float>, i32, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_xv_se_e16mf4(<vscale x 1 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.xv.se.nxv1f16.iXLen.i16.iXLen(iXLen 3, <vscale x 1 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.xv.se.nxv1f16.iXLen.i16.iXLen(iXLen, <vscale x 1 x half>, i16, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_xv_se_e16mf2(<vscale x 2 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.xv.se.nxv2f16.iXLen.i16.iXLen(iXLen 3, <vscale x 2 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.xv.se.nxv2f16.iXLen.i16.iXLen(iXLen, <vscale x 2 x half>, i16, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_xv_se_e16m1(<vscale x 4 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.xv.se.nxv4f16.iXLen.i16.iXLen(iXLen 3, <vscale x 4 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.xv.se.nxv4f16.iXLen.i16.iXLen(iXLen, <vscale x 4 x half>, i16, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_xv_se_e16m2(<vscale x 8 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.xv.se.nxv8f16.iXLen.i16.iXLen(iXLen 3, <vscale x 8 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.xv.se.nxv8f16.iXLen.i16.iXLen(iXLen, <vscale x 8 x half>, i16, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_xv_se_e16m4(<vscale x 16 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.xv.se.nxv16f16.iXLen.i16.iXLen(iXLen 3, <vscale x 16 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.xv.se.nxv16f16.iXLen.i16.iXLen(iXLen, <vscale x 16 x half>, i16, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_xv_se_e16m8(<vscale x 32 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.xv.se.nxv32f16.iXLen.i16.iXLen(iXLen 3, <vscale x 32 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.xv.se.nxv32f16.iXLen.i16.iXLen(iXLen, <vscale x 32 x half>, i16, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_xv_se_e32mf2(<vscale x 1 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.xv.se.nxv1f32.i32.f32.iXLen(iXLen 3, <vscale x 1 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.xv.se.nxv1f32.i32.f32.iXLen(iXLen, <vscale x 1 x float>, i32, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_xv_se_e32m1(<vscale x 2 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.xv.se.nxv2f32.i32.f32.iXLen(iXLen 3, <vscale x 2 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.xv.se.nxv2f32.i32.f32.iXLen(iXLen, <vscale x 2 x float>, i32, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_xv_se_e32m2(<vscale x 4 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xv.se.nxv4f32.i32.f32.iXLen(iXLen 3, <vscale x 4 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.xv.se.nxv4f32.i32.f32.iXLen(iXLen, <vscale x 4 x float>, i32, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_xv_se_e32m4(<vscale x 8 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.xv.se.nxv8f32.i32.f32.iXLen(iXLen 3, <vscale x 8 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.xv.se.nxv8f32.i32.f32.iXLen(iXLen, <vscale x 8 x float>, i32, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_xv_se_e32m8(<vscale x 16 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.xv.se.nxv16f32.i32.f32.iXLen(iXLen 3, <vscale x 16 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.xv.se.nxv16f32.i32.f32.iXLen(iXLen, <vscale x 16 x float>, i32, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_xv_e16mf4(<vscale x 1 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.xv.nxv1f16.iXLen.i16.iXLen(iXLen 3, <vscale x 1 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.xv.nxv1f16.iXLen.i16.iXLen(iXLen, <vscale x 1 x half>, i16, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_xv_e16mf2(<vscale x 2 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.xv.nxv2f16.iXLen.i16.iXLen(iXLen 3, <vscale x 2 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.xv.nxv2f16.iXLen.i16.iXLen(iXLen, <vscale x 2 x half>, i16, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_xv_e16m1(<vscale x 4 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.xv.nxv4f16.iXLen.i16.iXLen(iXLen 3, <vscale x 4 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.xv.nxv4f16.iXLen.i16.iXLen(iXLen, <vscale x 4 x half>, i16, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_xv_e16m2(<vscale x 8 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.xv.nxv8f16.iXLen.i16.iXLen(iXLen 3, <vscale x 8 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.xv.nxv8f16.iXLen.i16.iXLen(iXLen, <vscale x 8 x half>, i16, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_xv_e16m4(<vscale x 16 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.xv.nxv16f16.iXLen.i16.iXLen(iXLen 3, <vscale x 16 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.xv.nxv16f16.iXLen.i16.iXLen(iXLen, <vscale x 16 x half>, i16, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_xv_e16m8(<vscale x 32 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.xv.nxv32f16.iXLen.i16.iXLen(iXLen 3, <vscale x 32 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.xv.nxv32f16.iXLen.i16.iXLen(iXLen, <vscale x 32 x half>, i16, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_xv_e32mf2(<vscale x 1 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.xv.nxv1f32.i32.f32.iXLen(iXLen 3, <vscale x 1 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.xv.nxv1f32.i32.f32.iXLen(iXLen, <vscale x 1 x float>, i32, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_xv_e32m1(<vscale x 2 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.xv.nxv2f32.i32.f32.iXLen(iXLen 3, <vscale x 2 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.xv.nxv2f32.i32.f32.iXLen(iXLen, <vscale x 2 x float>, i32, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_xv_e32m2(<vscale x 4 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xv.nxv4f32.i32.f32.iXLen(iXLen 3, <vscale x 4 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.xv.nxv4f32.i32.f32.iXLen(iXLen, <vscale x 4 x float>, i32, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_xv_e32m4(<vscale x 8 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.xv.nxv8f32.i32.f32.iXLen(iXLen 3, <vscale x 8 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.xv.nxv8f32.i32.f32.iXLen(iXLen, <vscale x 8 x float>, i32, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_xv_e32m8(<vscale x 16 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xv_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.xv.nxv16f32.i32.f32.iXLen(iXLen 3, <vscale x 16 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.xv.nxv16f32.i32.f32.iXLen(iXLen, <vscale x 16 x float>, i32, iXLen)
+
+define void @test_f_sf_vc_iv_se_e16mf4(<vscale x 1 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_iv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f16.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 1 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f16.iXLen.iXLen(iXLen, iXLen, <vscale x 1 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_iv_se_e16mf2(<vscale x 2 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_iv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f16.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 2 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f16.iXLen.iXLen(iXLen, iXLen, <vscale x 2 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_iv_se_e16m1(<vscale x 4 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_iv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f16.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 4 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f16.iXLen.iXLen(iXLen, iXLen, <vscale x 4 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_iv_se_e16m2(<vscale x 8 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_iv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f16.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 8 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f16.iXLen.iXLen(iXLen, iXLen, <vscale x 8 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_iv_se_e16m4(<vscale x 16 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_iv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16f16.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 16 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16f16.iXLen.iXLen(iXLen, iXLen, <vscale x 16 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_iv_se_e16m8(<vscale x 32 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_iv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32f16.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 32 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32f16.iXLen.iXLen(iXLen, iXLen, <vscale x 32 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_iv_se_e32mf2(<vscale x 1 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_iv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f32.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 1 x float> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f32.iXLen.iXLen(iXLen, iXLen, <vscale x 1 x float>, iXLen, iXLen)
+
+define void @test_f_sf_vc_iv_se_e32m1(<vscale x 2 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_iv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f32.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 2 x float> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f32.iXLen.iXLen(iXLen, iXLen, <vscale x 2 x float>, iXLen, iXLen)
+
+define void @test_f_sf_vc_iv_se_e32m2(<vscale x 4 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_iv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f32.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 4 x float> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f32.iXLen.iXLen(iXLen, iXLen, <vscale x 4 x float>, iXLen, iXLen)
+
+define void @test_f_sf_vc_iv_se_e32m4(<vscale x 8 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_iv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f32.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 8 x float> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f32.iXLen.iXLen(iXLen, iXLen, <vscale x 8 x float>, iXLen, iXLen)
+
+define void @test_f_sf_vc_iv_se_e32m8(<vscale x 16 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_iv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16f32.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 16 x float> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16f32.iXLen.iXLen(iXLen, iXLen, <vscale x 16 x float>, iXLen, iXLen)
+
+define void @test_f_sf_vc_iv_se_e64m1(<vscale x 1 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_iv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f64.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 1 x double> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f64.iXLen.iXLen(iXLen, iXLen, <vscale x 1 x double>, iXLen, iXLen)
+
+define void @test_f_sf_vc_iv_se_e64m2(<vscale x 2 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_iv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f64.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 2 x double> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f64.iXLen.iXLen(iXLen, iXLen, <vscale x 2 x double>, iXLen, iXLen)
+
+define void @test_f_sf_vc_iv_se_e64m4(<vscale x 4 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_iv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f64.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 4 x double> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f64.iXLen.iXLen(iXLen, iXLen, <vscale x 4 x double>, iXLen, iXLen)
+
+define void @test_f_sf_vc_iv_se_e64m8(<vscale x 8 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_iv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f64.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 8 x double> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f64.iXLen.iXLen(iXLen, iXLen, <vscale x 8 x double>, iXLen, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_iv_se_e16mf4(<vscale x 1 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.iv.se.nxv1f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.iv.se.nxv1f16.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x half>, iXLen, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_iv_se_e16mf2(<vscale x 2 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.iv.se.nxv2f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.iv.se.nxv2f16.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x half>, iXLen, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_iv_se_e16m1(<vscale x 4 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.iv.se.nxv4f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.iv.se.nxv4f16.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x half>, iXLen, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_iv_se_e16m2(<vscale x 8 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.iv.se.nxv8f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.iv.se.nxv8f16.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x half>, iXLen, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_iv_se_e16m4(<vscale x 16 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.iv.se.nxv16f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.iv.se.nxv16f16.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x half>, iXLen, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_iv_se_e16m8(<vscale x 32 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.iv.se.nxv32f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.iv.se.nxv32f16.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x half>, iXLen, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_iv_se_e32mf2(<vscale x 1 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.iv.se.nxv1f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.iv.se.nxv1f32.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x float>, iXLen, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_iv_se_e32m1(<vscale x 2 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.iv.se.nxv2f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.iv.se.nxv2f32.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x float>, iXLen, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_iv_se_e32m2(<vscale x 4 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.iv.se.nxv4f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.iv.se.nxv4f32.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x float>, iXLen, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_iv_se_e32m4(<vscale x 8 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.iv.se.nxv8f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.iv.se.nxv8f32.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x float>, iXLen, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_iv_se_e32m8(<vscale x 16 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.iv.se.nxv16f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.iv.se.nxv16f32.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x float>, iXLen, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_iv_se_e64m1(<vscale x 1 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.iv.se.nxv1f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x double> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.iv.se.nxv1f64.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x double>, iXLen, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_iv_se_e64m2(<vscale x 2 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.iv.se.nxv2f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x double> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.iv.se.nxv2f64.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x double>, iXLen, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_iv_se_e64m4(<vscale x 4 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.iv.se.nxv4f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x double> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.iv.se.nxv4f64.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x double>, iXLen, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_iv_se_e64m8(<vscale x 8 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.iv.se.nxv8f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x double> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.iv.se.nxv8f64.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x double>, iXLen, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_iv_e16mf4(<vscale x 1 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.iv.nxv1f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.iv.nxv1f16.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x half>, iXLen, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_iv_e16mf2(<vscale x 2 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.iv.nxv2f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.iv.nxv2f16.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x half>, iXLen, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_iv_e16m1(<vscale x 4 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.iv.nxv4f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.iv.nxv4f16.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x half>, iXLen, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_iv_e16m2(<vscale x 8 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.iv.nxv8f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.iv.nxv8f16.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x half>, iXLen, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_iv_e16m4(<vscale x 16 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.iv.nxv16f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.iv.nxv16f16.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x half>, iXLen, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_iv_e16m8(<vscale x 32 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.iv.nxv32f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.iv.nxv32f16.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x half>, iXLen, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_iv_e32mf2(<vscale x 1 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.iv.nxv1f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.iv.nxv1f32.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x float>, iXLen, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_iv_e32m1(<vscale x 2 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.iv.nxv2f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.iv.nxv2f32.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x float>, iXLen, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_iv_e32m2(<vscale x 4 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.iv.nxv4f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.iv.nxv4f32.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x float>, iXLen, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_iv_e32m4(<vscale x 8 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.iv.nxv8f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.iv.nxv8f32.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x float>, iXLen, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_iv_e32m8(<vscale x 16 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.iv.nxv16f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.iv.nxv16f32.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x float>, iXLen, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_iv_e64m1(<vscale x 1 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.iv.nxv1f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x double> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.iv.nxv1f64.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x double>, iXLen, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_iv_e64m2(<vscale x 2 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.iv.nxv2f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x double> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.iv.nxv2f64.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x double>, iXLen, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_iv_e64m4(<vscale x 4 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.iv.nxv4f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x double> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.iv.nxv4f64.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x double>, iXLen, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_iv_e64m8(<vscale x 8 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_iv_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.iv.nxv8f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x double> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.iv.nxv8f64.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x double>, iXLen, iXLen)
+
+define void @test_f_sf_vc_fv_se_e16mf4(<vscale x 1 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f16.f16.iXLen(iXLen 1, iXLen 31, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f16.f16.iXLen(iXLen, iXLen, <vscale x 1 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fv_se_e16mf2(<vscale x 2 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f16.f16.iXLen(iXLen 1, iXLen 31, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f16.f16.iXLen(iXLen, iXLen, <vscale x 2 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fv_se_e16m1(<vscale x 4 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f16.f16.iXLen(iXLen 1, iXLen 31, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f16.f16.iXLen(iXLen, iXLen, <vscale x 4 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fv_se_e16m2(<vscale x 8 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f16.f16.iXLen(iXLen 1, iXLen 31, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f16.f16.iXLen(iXLen, iXLen, <vscale x 8 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fv_se_e16m4(<vscale x 16 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16f16.f16.iXLen(iXLen 1, iXLen 31, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16f16.f16.iXLen(iXLen, iXLen, <vscale x 16 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fv_se_e16m8(<vscale x 32 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv32f16.f16.iXLen(iXLen 1, iXLen 31, <vscale x 32 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv32f16.f16.iXLen(iXLen, iXLen, <vscale x 32 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fv_se_e32mf2(<vscale x 1 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f32.f32.iXLen(iXLen 1, iXLen 31, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f32.f32.iXLen(iXLen, iXLen, <vscale x 1 x float>, float, iXLen)
+
+define void @test_f_sf_vc_fv_se_e32m1(<vscale x 2 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f32.f32.iXLen(iXLen 1, iXLen 31, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f32.f32.iXLen(iXLen, iXLen, <vscale x 2 x float>, float, iXLen)
+
+define void @test_f_sf_vc_fv_se_e32m2(<vscale x 4 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f32.f32.iXLen(iXLen 1, iXLen 31, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f32.f32.iXLen(iXLen, iXLen, <vscale x 4 x float>, float, iXLen)
+
+define void @test_f_sf_vc_fv_se_e32m4(<vscale x 8 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f32.f32.iXLen(iXLen 1, iXLen 31, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f32.f32.iXLen(iXLen, iXLen, <vscale x 8 x float>, float, iXLen)
+
+define void @test_f_sf_vc_fv_se_e32m8(<vscale x 16 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16f32.f32.iXLen(iXLen 1, iXLen 31, <vscale x 16 x float> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16f32.f32.iXLen(iXLen, iXLen, <vscale x 16 x float>, float, iXLen)
+
+define void @test_f_sf_vc_fv_se_e64m1(<vscale x 1 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f64.f64.iXLen(iXLen 1, iXLen 31, <vscale x 1 x double> %vs2, double %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f64.f64.iXLen(iXLen, iXLen, <vscale x 1 x double>, double, iXLen)
+
+define void @test_f_sf_vc_fv_se_e64m2(<vscale x 2 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f64.f64.iXLen(iXLen 1, iXLen 31, <vscale x 2 x double> %vs2, double %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f64.f64.iXLen(iXLen, iXLen, <vscale x 2 x double>, double, iXLen)
+
+define void @test_f_sf_vc_fv_se_e64m4(<vscale x 4 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f64.f64.iXLen(iXLen 1, iXLen 31, <vscale x 4 x double> %vs2, double %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f64.f64.iXLen(iXLen, iXLen, <vscale x 4 x double>, double, iXLen)
+
+define void @test_f_sf_vc_fv_se_e64m8(<vscale x 8 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f64.f64.iXLen(iXLen 1, iXLen 31, <vscale x 8 x double> %vs2, double %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f64.f64.iXLen(iXLen, iXLen, <vscale x 8 x double>, double, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_fv_se_e16mf4(<vscale x 1 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.fv.se.nxv1f16.iXLen.f16.iXLen(iXLen 1, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.fv.se.nxv1f16.iXLen.f16.iXLen(iXLen, <vscale x 1 x half>, half, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_fv_se_e16mf2(<vscale x 2 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.fv.se.nxv2f16.iXLen.f16.iXLen(iXLen 1, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.fv.se.nxv2f16.iXLen.f16.iXLen(iXLen, <vscale x 2 x half>, half, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_fv_se_e16m1(<vscale x 4 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.fv.se.nxv4f16.iXLen.f16.iXLen(iXLen 1, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.fv.se.nxv4f16.iXLen.f16.iXLen(iXLen, <vscale x 4 x half>, half, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_fv_se_e16m2(<vscale x 8 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.fv.se.nxv8f16.iXLen.f16.iXLen(iXLen 1, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.fv.se.nxv8f16.iXLen.f16.iXLen(iXLen, <vscale x 8 x half>, half, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_fv_se_e16m4(<vscale x 16 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.fv.se.nxv16f16.iXLen.f16.iXLen(iXLen 1, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.fv.se.nxv16f16.iXLen.f16.iXLen(iXLen, <vscale x 16 x half>, half, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_fv_se_e16m8(<vscale x 32 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.fv.se.nxv32f16.iXLen.f16.iXLen(iXLen 1, <vscale x 32 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.fv.se.nxv32f16.iXLen.f16.iXLen(iXLen, <vscale x 32 x half>, half, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_fv_se_e32mf2(<vscale x 1 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.fv.se.nxv1f32.iXLen.f32.iXLen(iXLen 1, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.fv.se.nxv1f32.iXLen.f32.iXLen(iXLen, <vscale x 1 x float>, float, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_fv_se_e32m1(<vscale x 2 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.fv.se.nxv2f32.iXLen.f32.iXLen(iXLen 1, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.fv.se.nxv2f32.iXLen.f32.iXLen(iXLen, <vscale x 2 x float>, float, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_fv_se_e32m2(<vscale x 4 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fv.se.nxv4f32.iXLen.f32.iXLen(iXLen 1, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.fv.se.nxv4f32.iXLen.f32.iXLen(iXLen, <vscale x 4 x float>, float, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_fv_se_e32m4(<vscale x 8 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.fv.se.nxv8f32.iXLen.f32.iXLen(iXLen 1, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.fv.se.nxv8f32.iXLen.f32.iXLen(iXLen, <vscale x 8 x float>, float, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_fv_se_e32m8(<vscale x 16 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.fv.se.nxv16f32.iXLen.f32.iXLen(iXLen 1, <vscale x 16 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.fv.se.nxv16f32.iXLen.f32.iXLen(iXLen, <vscale x 16 x float>, float, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_fv_se_e64m1(<vscale x 1 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.fv.se.nxv1f64.iXLen.f64.iXLen(iXLen 1, <vscale x 1 x double> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.fv.se.nxv1f64.iXLen.f64.iXLen(iXLen, <vscale x 1 x double>, double, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_fv_se_e64m2(<vscale x 2 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.fv.se.nxv2f64.iXLen.f64.iXLen(iXLen 1, <vscale x 2 x double> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.fv.se.nxv2f64.iXLen.f64.iXLen(iXLen, <vscale x 2 x double>, double, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_fv_se_e64m4(<vscale x 4 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.fv.se.nxv4f64.iXLen.f64.iXLen(iXLen 1, <vscale x 4 x double> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.fv.se.nxv4f64.iXLen.f64.iXLen(iXLen, <vscale x 4 x double>, double, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_fv_se_e64m8(<vscale x 8 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.fv.se.nxv8f64.iXLen.f64.iXLen(iXLen 1, <vscale x 8 x double> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.fv.se.nxv8f64.iXLen.f64.iXLen(iXLen, <vscale x 8 x double>, double, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_fv_e16mf4(<vscale x 1 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.fv.nxv1f16.iXLen.f16.iXLen(iXLen 1, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.fv.nxv1f16.iXLen.f16.iXLen(iXLen, <vscale x 1 x half>, half, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_fv_e16mf2(<vscale x 2 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.fv.nxv2f16.iXLen.f16.iXLen(iXLen 1, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.fv.nxv2f16.iXLen.f16.iXLen(iXLen, <vscale x 2 x half>, half, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_fv_e16m1(<vscale x 4 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.fv.nxv4f16.iXLen.f16.iXLen(iXLen 1, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.fv.nxv4f16.iXLen.f16.iXLen(iXLen, <vscale x 4 x half>, half, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_fv_e16m2(<vscale x 8 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.fv.nxv8f16.iXLen.f16.iXLen(iXLen 1, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.fv.nxv8f16.iXLen.f16.iXLen(iXLen, <vscale x 8 x half>, half, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_fv_e16m4(<vscale x 16 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.fv.nxv16f16.iXLen.f16.iXLen(iXLen 1, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.fv.nxv16f16.iXLen.f16.iXLen(iXLen, <vscale x 16 x half>, half, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_fv_e16m8(<vscale x 32 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.fv.nxv32f16.iXLen.f16.iXLen(iXLen 1, <vscale x 32 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.fv.nxv32f16.iXLen.f16.iXLen(iXLen, <vscale x 32 x half>, half, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_fv_e32mf2(<vscale x 1 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.fv.nxv1f32.iXLen.f32.iXLen(iXLen 1, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.fv.nxv1f32.iXLen.f32.iXLen(iXLen, <vscale x 1 x float>, float, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_fv_e32m1(<vscale x 2 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.fv.nxv2f32.iXLen.f32.iXLen(iXLen 1, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.fv.nxv2f32.iXLen.f32.iXLen(iXLen, <vscale x 2 x float>, float, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_fv_e32m2(<vscale x 4 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fv.nxv4f32.iXLen.f32.iXLen(iXLen 1, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.fv.nxv4f32.iXLen.f32.iXLen(iXLen, <vscale x 4 x float>, float, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_fv_e32m4(<vscale x 8 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.fv.nxv8f32.iXLen.f32.iXLen(iXLen 1, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.fv.nxv8f32.iXLen.f32.iXLen(iXLen, <vscale x 8 x float>, float, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_fv_e32m8(<vscale x 16 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.fv.nxv16f32.iXLen.f32.iXLen(iXLen 1, <vscale x 16 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.fv.nxv16f32.iXLen.f32.iXLen(iXLen, <vscale x 16 x float>, float, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_fv_e64m1(<vscale x 1 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.fv.nxv1f64.iXLen.f64.iXLen(iXLen 1, <vscale x 1 x double> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.fv.nxv1f64.iXLen.f64.iXLen(iXLen, <vscale x 1 x double>, double, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_fv_e64m2(<vscale x 2 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.fv.nxv2f64.iXLen.f64.iXLen(iXLen 1, <vscale x 2 x double> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.fv.nxv2f64.iXLen.f64.iXLen(iXLen, <vscale x 2 x double>, double, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_fv_e64m4(<vscale x 4 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.fv.nxv4f64.iXLen.f64.iXLen(iXLen 1, <vscale x 4 x double> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.fv.nxv4f64.iXLen.f64.iXLen(iXLen, <vscale x 4 x double>, double, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_fv_e64m8(<vscale x 8 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fv_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.fv.nxv8f64.iXLen.f64.iXLen(iXLen 1, <vscale x 8 x double> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.fv.nxv8f64.iXLen.f64.iXLen(iXLen, <vscale x 8 x double>, double, iXLen)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll
index d37d121cdd19737..ef2eee9e438e99d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN:  sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+xsfvcp \
+; RUN:  sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+xsfvcp \
 ; RUN:    -verify-machineinstrs | FileCheck %s
-; RUN:  sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+xsfvcp \
+; RUN:  sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+xsfvcp \
 ; RUN:    -verify-machineinstrs | FileCheck %s
 
 define void @test_sf_vc_vvv_se_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
@@ -3018,3 +3018,2196 @@ entry:
 }
 
 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvv.nxv8i64.iXLen.f64.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, double, iXLen)
+
+define void @test_f_sf_vc_vvv_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f16.nxv1f16.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f16.nxv1f16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, iXLen)
+
+define void @test_f_sf_vc_vvv_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f16.nxv2f16.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f16.nxv2f16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, iXLen)
+
+define void @test_f_sf_vc_vvv_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f16.nxv4f16.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f16.nxv4f16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, iXLen)
+
+define void @test_f_sf_vc_vvv_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f16.nxv8f16.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f16.nxv8f16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, iXLen)
+
+define void @test_f_sf_vc_vvv_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f16.nxv16f16.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f16.nxv16f16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, <vscale x 16 x half>, iXLen)
+
+define void @test_f_sf_vc_vvv_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re16.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32f16.nxv32f16.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32f16.nxv32f16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, <vscale x 32 x half>, iXLen)
+
+define void @test_f_sf_vc_vvv_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f32.nxv1f32.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f32.nxv1f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, iXLen)
+
+define void @test_f_sf_vc_vvv_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f32.nxv2f32.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f32.nxv2f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, iXLen)
+
+define void @test_f_sf_vc_vvv_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f32.nxv4f32.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f32.nxv4f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, iXLen)
+
+define void @test_f_sf_vc_vvv_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f32.nxv8f32.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f32.nxv8f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x float>, iXLen)
+
+define void @test_f_sf_vc_vvv_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re32.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f32.nxv16f32.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f32.nxv16f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x float>, iXLen)
+
+define void @test_f_sf_vc_vvv_se_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f64.nxv1f64.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f64.nxv1f64.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, iXLen)
+
+define void @test_f_sf_vc_vvv_se_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f64.nxv2f64.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f64.nxv2f64.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, iXLen)
+
+define void @test_f_sf_vc_vvv_se_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f64.nxv4f64.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f64.nxv4f64.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x double>, <vscale x 4 x double>, iXLen)
+
+define void @test_f_sf_vc_vvv_se_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re64.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f64.nxv8f64.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f64.nxv8f64.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x double>, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_vvv_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv1f16.iXLen.nxv1f16.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv1f16.iXLen.nxv1f16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_vvv_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv2f16.iXLen.nxv2f16.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv2f16.iXLen.nxv2f16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_vvv_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv4f16.iXLen.nxv4f16.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv4f16.iXLen.nxv4f16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_vvv_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv8f16.iXLen.nxv8f16.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv8f16.iXLen.nxv8f16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_vvv_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv16f16.iXLen.nxv16f16.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv16f16.iXLen.nxv16f16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, <vscale x 16 x half>, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_vvv_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re16.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv32f16.iXLen.nxv32f16.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv32f16.iXLen.nxv32f16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, <vscale x 32 x half>, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_vvv_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv1f32.iXLen.nxv1f32.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv1f32.iXLen.nxv1f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_vvv_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv2f32.iXLen.nxv2f32.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv2f32.iXLen.nxv2f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_vvv_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv4f32.iXLen.nxv4f32.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv4f32.iXLen.nxv4f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_vvv_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv8f32.iXLen.nxv8f32.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv8f32.iXLen.nxv8f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x float>, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_vvv_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re32.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv16f32.iXLen.nxv16f32.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv16f32.iXLen.nxv16f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x float>, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_vvv_se_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv1f64.iXLen.nxv1f64.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv1f64.iXLen.nxv1f64.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_vvv_se_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv2f64.iXLen.nxv2f64.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv2f64.iXLen.nxv2f64.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_vvv_se_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv4f64.iXLen.nxv4f64.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv4f64.iXLen.nxv4f64.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x double>, <vscale x 4 x double>, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_vvv_se_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re64.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv8f64.iXLen.nxv8f64.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv8f64.iXLen.nxv8f64.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x double>, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_vvv_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.vvv.nxv1f16.iXLen.nxv1f16.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.vvv.nxv1f16.iXLen.nxv1f16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_vvv_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.vvv.nxv2f16.iXLen.nxv2f16.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.vvv.nxv2f16.iXLen.nxv2f16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_vvv_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.vvv.nxv4f16.iXLen.nxv4f16.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.vvv.nxv4f16.iXLen.nxv4f16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_vvv_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.vvv.nxv8f16.iXLen.nxv8f16.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.vvv.nxv8f16.iXLen.nxv8f16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_vvv_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.vvv.nxv16f16.iXLen.nxv16f16.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.vvv.nxv16f16.iXLen.nxv16f16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, <vscale x 16 x half>, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_vvv_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re16.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.vvv.nxv32f16.iXLen.nxv32f16.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.vvv.nxv32f16.iXLen.nxv32f16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, <vscale x 32 x half>, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_vvv_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvv.nxv1f32.iXLen.nxv1f32.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvv.nxv1f32.iXLen.nxv1f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_vvv_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvv.nxv2f32.iXLen.nxv2f32.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvv.nxv2f32.iXLen.nxv2f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_vvv_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvv.nxv4f32.iXLen.nxv4f32.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvv.nxv4f32.iXLen.nxv4f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_vvv_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvv.nxv8f32.iXLen.nxv8f32.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvv.nxv8f32.iXLen.nxv8f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x float>, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_vvv_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re32.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvv.nxv16f32.iXLen.nxv16f32.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvv.nxv16f32.iXLen.nxv16f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x float>, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_vvv_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvv.nxv1f64.iXLen.nxv1f64.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvv.nxv1f64.iXLen.nxv1f64.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_vvv_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvv.nxv2f64.iXLen.nxv2f64.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvv.nxv2f64.iXLen.nxv2f64.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_vvv_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvv.nxv4f64.iXLen.nxv4f64.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvv.nxv4f64.iXLen.nxv4f64.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x double>, <vscale x 4 x double>, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_vvv_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvv_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re64.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvv.nxv8f64.iXLen.nxv8f64.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvv.nxv8f64.iXLen.nxv8f64.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x double>, iXLen)
+
+define void @test_f_sf_vc_xvv_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1f16.f16.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1f16.f16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xvv_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2f16.f16.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2f16.f16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xvv_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4f16.f16.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4f16.f16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xvv_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8f16.f16.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8f16.f16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xvv_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16f16.f16.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16f16.f16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xvv_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32f16.f16.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32f16.f16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xvv_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.f32.nxv1f32.iXLen.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.f32.nxv1f32.iXLen.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, i32, iXLen)
+
+define void @test_f_sf_vc_xvv_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.f32.nxv2f32.iXLen.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.f32.nxv2f32.iXLen.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, i32, iXLen)
+
+define void @test_f_sf_vc_xvv_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.f32.nxv4f32.iXLen.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.f32.nxv4f32.iXLen.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, i32, iXLen)
+
+define void @test_f_sf_vc_xvv_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.f32.nxv8f32.iXLen.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.f32.nxv8f32.iXLen.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, i32, iXLen)
+
+define void @test_f_sf_vc_xvv_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.f32.nxv16f32.iXLen.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.f32.nxv16f32.iXLen.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, i32, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_xvv_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv1f16.iXLen.f16.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv1f16.iXLen.f16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, i16, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_xvv_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv2f16.iXLen.f16.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv2f16.iXLen.f16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, i16, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_xvv_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv4f16.iXLen.f16.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv4f16.iXLen.f16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, i16, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_xvv_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv8f16.iXLen.f16.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv8f16.iXLen.f16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, i16, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_xvv_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv16f16.iXLen.f16.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv16f16.iXLen.f16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, i16, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_xvv_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv32f16.iXLen.f16.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv32f16.iXLen.f16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, i16, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_xvv_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv1f32.iXLen.f32.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv1f32.iXLen.f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, i32, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_xvv_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv2f32.iXLen.f32.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv2f32.iXLen.f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, i32, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_xvv_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv4f32.iXLen.f32.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv4f32.iXLen.f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, i32, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_xvv_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv8f32.iXLen.f32.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv8f32.iXLen.f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, i32, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_xvv_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv16f32.iXLen.f32.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv16f32.iXLen.f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, i32, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_xvv_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.xvv.nxv1f16.iXLen.f16.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.xvv.nxv1f16.iXLen.f16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, i16, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_xvv_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.xvv.nxv2f16.iXLen.f16.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.xvv.nxv2f16.iXLen.f16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, i16, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_xvv_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.xvv.nxv4f16.iXLen.f16.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.xvv.nxv4f16.iXLen.f16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, i16, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_xvv_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.xvv.nxv8f16.iXLen.f16.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.xvv.nxv8f16.iXLen.f16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, i16, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_xvv_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.xvv.nxv16f16.iXLen.f16.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.xvv.nxv16f16.iXLen.f16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, i16, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_xvv_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.xvv.nxv32f16.iXLen.f16.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.xvv.nxv32f16.iXLen.f16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, i16, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_xvv_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvv.nxv1f32.iXLen.f32.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvv.nxv1f32.iXLen.f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, i32, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_xvv_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvv.nxv2f32.iXLen.f32.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvv.nxv2f32.iXLen.f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, i32, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_xvv_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvv.nxv4f32.iXLen.f32.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvv.nxv4f32.iXLen.f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, i32, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_xvv_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvv.nxv8f32.iXLen.f32.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvv.nxv8f32.iXLen.f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, i32, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_xvv_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvv_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvv.nxv16f32.iXLen.f32.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvv.nxv16f32.iXLen.f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, i32, iXLen)
+
+define void @test_f_sf_vc_ivv_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f16.iXLen.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f16.iXLen.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivv_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f16.iXLen.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f16.iXLen.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivv_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f16.iXLen.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f16.iXLen.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivv_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f16.iXLen.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f16.iXLen.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivv_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f16.iXLen.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f16.iXLen.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivv_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32f16.iXLen.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32f16.iXLen.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivv_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f32.iXLen.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f32.iXLen.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivv_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f32.iXLen.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f32.iXLen.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivv_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f32.iXLen.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f32.iXLen.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivv_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f32.iXLen.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f32.iXLen.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivv_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f32.iXLen.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f32.iXLen.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivv_se_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f64.iXLen.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f64.iXLen.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x double>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivv_se_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f64.iXLen.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f64.iXLen.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x double>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivv_se_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f64.iXLen.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f64.iXLen.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x double>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivv_se_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f64.iXLen.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f64.iXLen.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, iXLen, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_ivv_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv1f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv1f16.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, iXLen, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_ivv_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv2f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv2f16.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, iXLen, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_ivv_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv4f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv4f16.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, iXLen, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_ivv_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv8f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv8f16.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, iXLen, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_ivv_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv16f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv16f16.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, iXLen, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_ivv_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv32f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv32f16.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, iXLen, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_ivv_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv1f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv1f32.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, iXLen, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_ivv_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv2f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv2f32.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, iXLen, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_ivv_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv4f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv4f32.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, iXLen, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_ivv_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv8f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv8f32.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, iXLen, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_ivv_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv16f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv16f32.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, iXLen, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_ivv_se_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivv.se.nxv1f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivv.se.nxv1f64.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x double>, iXLen, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_ivv_se_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivv.se.nxv2f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivv.se.nxv2f64.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x double>, iXLen, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_ivv_se_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivv.se.nxv4f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivv.se.nxv4f64.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x double>, iXLen, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_ivv_se_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivv.se.nxv8f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivv.se.nxv8f64.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, iXLen, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_ivv_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.ivv.nxv1f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.ivv.nxv1f16.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, iXLen, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_ivv_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.ivv.nxv2f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.ivv.nxv2f16.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, iXLen, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_ivv_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.ivv.nxv4f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.ivv.nxv4f16.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, iXLen, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_ivv_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.ivv.nxv8f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.ivv.nxv8f16.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, iXLen, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_ivv_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.ivv.nxv16f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.ivv.nxv16f16.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, iXLen, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_ivv_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.ivv.nxv32f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.ivv.nxv32f16.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, iXLen, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_ivv_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivv.nxv1f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivv.nxv1f32.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, iXLen, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_ivv_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivv.nxv2f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivv.nxv2f32.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, iXLen, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_ivv_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivv.nxv4f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivv.nxv4f32.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, iXLen, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_ivv_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivv.nxv8f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivv.nxv8f32.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, iXLen, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_ivv_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivv.nxv16f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivv.nxv16f32.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, iXLen, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_ivv_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivv.nxv1f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivv.nxv1f64.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x double>, iXLen, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_ivv_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivv.nxv2f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivv.nxv2f64.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x double>, iXLen, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_ivv_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivv.nxv4f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivv.nxv4f64.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x double>, iXLen, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_ivv_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivv_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivv.nxv8f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivv.nxv8f64.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, iXLen, iXLen)
+
+define void @test_f_sf_vc_fvv_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f16.f16.iXLen(iXLen 1, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f16.f16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fvv_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f16.f16.iXLen(iXLen 1, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f16.f16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fvv_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f16.f16.iXLen(iXLen 1, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f16.f16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fvv_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f16.f16.iXLen(iXLen 1, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f16.f16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fvv_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f16.f16.iXLen(iXLen 1, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f16.f16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fvv_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32f16.f16.iXLen(iXLen 1, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32f16.f16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fvv_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f32.f32.iXLen(iXLen 1, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f32.f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, float, iXLen)
+
+define void @test_f_sf_vc_fvv_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f32.f32.iXLen(iXLen 1, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f32.f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, float, iXLen)
+
+define void @test_f_sf_vc_fvv_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f32.f32.iXLen(iXLen 1, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f32.f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, float, iXLen)
+
+define void @test_f_sf_vc_fvv_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f32.f32.iXLen(iXLen 1, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f32.f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, float, iXLen)
+
+define void @test_f_sf_vc_fvv_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f32.f32.iXLen(iXLen 1, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f32.f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, float, iXLen)
+
+define void @test_f_sf_vc_fvv_se_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f64.f64.iXLen(iXLen 1, <vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, double %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f64.f64.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x double>, double, iXLen)
+
+define void @test_f_sf_vc_fvv_se_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f64.f64.iXLen(iXLen 1, <vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, double %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f64.f64.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x double>, double, iXLen)
+
+define void @test_f_sf_vc_fvv_se_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f64.f64.iXLen(iXLen 1, <vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, double %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f64.f64.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x double>, double, iXLen)
+
+define void @test_f_sf_vc_fvv_se_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f64.f64.iXLen(iXLen 1, <vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, double %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f64.f64.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, double, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_fvv_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv1f16.iXLen.f16.iXLen(iXLen 1, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv1f16.iXLen.f16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, half, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_fvv_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv2f16.iXLen.f16.iXLen(iXLen 1, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv2f16.iXLen.f16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, half, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_fvv_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv4f16.iXLen.f16.iXLen(iXLen 1, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv4f16.iXLen.f16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, half, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_fvv_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv8f16.iXLen.f16.iXLen(iXLen 1, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv8f16.iXLen.f16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, half, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_fvv_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv16f16.iXLen.f16.iXLen(iXLen 1, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv16f16.iXLen.f16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, half, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_fvv_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv32f16.iXLen.f16.iXLen(iXLen 1, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv32f16.iXLen.f16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, half, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_fvv_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv1f32.iXLen.f32.iXLen(iXLen 1, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv1f32.iXLen.f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, float, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_fvv_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv2f32.iXLen.f32.iXLen(iXLen 1, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv2f32.iXLen.f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, float, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_fvv_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv4f32.iXLen.f32.iXLen(iXLen 1, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv4f32.iXLen.f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, float, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_fvv_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv8f32.iXLen.f32.iXLen(iXLen 1, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv8f32.iXLen.f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, float, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_fvv_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv16f32.iXLen.f32.iXLen(iXLen 1, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv16f32.iXLen.f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, float, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_fvv_se_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvv.se.nxv1f64.iXLen.f64.iXLen(iXLen 1, <vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvv.se.nxv1f64.iXLen.f64.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x double>, double, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_fvv_se_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvv.se.nxv2f64.iXLen.f64.iXLen(iXLen 1, <vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvv.se.nxv2f64.iXLen.f64.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x double>, double, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_fvv_se_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvv.se.nxv4f64.iXLen.f64.iXLen(iXLen 1, <vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvv.se.nxv4f64.iXLen.f64.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x double>, double, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_fvv_se_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvv.se.nxv8f64.iXLen.f64.iXLen(iXLen 1, <vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvv.se.nxv8f64.iXLen.f64.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, double, iXLen)
+
+define <vscale x 1 x half> @test_f_sf_vc_v_fvv_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.fvv.nxv1f16.iXLen.f16.iXLen(iXLen 1, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 1 x half> %0
+}
+
+declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.fvv.nxv1f16.iXLen.f16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, half, iXLen)
+
+define <vscale x 2 x half> @test_f_sf_vc_v_fvv_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.fvv.nxv2f16.iXLen.f16.iXLen(iXLen 1, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 2 x half> %0
+}
+
+declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.fvv.nxv2f16.iXLen.f16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, half, iXLen)
+
+define <vscale x 4 x half> @test_f_sf_vc_v_fvv_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.fvv.nxv4f16.iXLen.f16.iXLen(iXLen 1, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 4 x half> %0
+}
+
+declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.fvv.nxv4f16.iXLen.f16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, half, iXLen)
+
+define <vscale x 8 x half> @test_f_sf_vc_v_fvv_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.fvv.nxv8f16.iXLen.f16.iXLen(iXLen 1, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 8 x half> %0
+}
+
+declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.fvv.nxv8f16.iXLen.f16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, half, iXLen)
+
+define <vscale x 16 x half> @test_f_sf_vc_v_fvv_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.fvv.nxv16f16.iXLen.f16.iXLen(iXLen 1, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 16 x half> %0
+}
+
+declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.fvv.nxv16f16.iXLen.f16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, half, iXLen)
+
+define <vscale x 32 x half> @test_f_sf_vc_v_fvv_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.fvv.nxv32f16.iXLen.f16.iXLen(iXLen 1, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.fvv.nxv32f16.iXLen.f16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, half, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_fvv_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvv.nxv1f32.iXLen.f32.iXLen(iXLen 1, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvv.nxv1f32.iXLen.f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, float, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_fvv_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvv.nxv2f32.iXLen.f32.iXLen(iXLen 1, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvv.nxv2f32.iXLen.f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, float, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_fvv_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvv.nxv4f32.iXLen.f32.iXLen(iXLen 1, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvv.nxv4f32.iXLen.f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, float, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_fvv_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvv.nxv8f32.iXLen.f32.iXLen(iXLen 1, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvv.nxv8f32.iXLen.f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, float, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_fvv_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvv.nxv16f32.iXLen.f32.iXLen(iXLen 1, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvv.nxv16f32.iXLen.f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, float, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_fvv_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvv.nxv1f64.iXLen.f64.iXLen(iXLen 1, <vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvv.nxv1f64.iXLen.f64.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x double>, double, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_fvv_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvv.nxv2f64.iXLen.f64.iXLen(iXLen 1, <vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvv.nxv2f64.iXLen.f64.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x double>, double, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_fvv_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvv.nxv4f64.iXLen.f64.iXLen(iXLen 1, <vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvv.nxv4f64.iXLen.f64.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x double>, double, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_fvv_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvv_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvv.nxv8f64.iXLen.f64.iXLen(iXLen 1, <vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvv.nxv8f64.iXLen.f64.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, double, iXLen)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvw.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvw.ll
index 2d6ac8d55fc152e..35754aa02268638 100644
--- a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvw.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvw.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN:  sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+xsfvcp \
+; RUN:  sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+xsfvcp \
 ; RUN:    -verify-machineinstrs | FileCheck %s
-; RUN:  sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+xsfvcp \
+; RUN:  sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+xsfvcp \
 ; RUN:    -verify-machineinstrs | FileCheck %s
 
 define void @test_sf_vc_vvw_se_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
@@ -2109,3 +2109,1407 @@ entry:
 }
 
 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvw.nxv8i64.iXLen.nxv8i32.f32.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, float, iXLen)
+
+define void @test_f_sf_vc_vvw_se_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvw_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f32.nxv1f16.nxv1f16.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f32.nxv1f16.nxv1f16.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, <vscale x 1 x half>, iXLen)
+
+define void @test_f_sf_vc_vvw_se_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvw_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f32.nxv2f16.nxv2f16.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f32.nxv2f16.nxv2f16.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, <vscale x 2 x half>, iXLen)
+
+define void @test_f_sf_vc_vvw_se_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvw_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.vvw 3, v8, v10, v11
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f32.nxv4f16.nxv4f16.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f32.nxv4f16.nxv4f16.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, <vscale x 4 x half>, iXLen)
+
+define void @test_f_sf_vc_vvw_se_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvw_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.vvw 3, v8, v12, v14
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f32.nxv8f16.nxv8f16.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f32.nxv8f16.nxv8f16.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, <vscale x 8 x half>, iXLen)
+
+define void @test_f_sf_vc_vvw_se_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvw_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.vvw 3, v8, v16, v20
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16f32.nxv16f16.nxv16f16.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16f32.nxv16f16.nxv16f16.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, <vscale x 16 x half>, iXLen)
+
+define void @test_f_sf_vc_vvw_se_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvw_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f64.nxv1f32.nxv1f32.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f64.nxv1f32.nxv1f32.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, <vscale x 1 x float>, iXLen)
+
+define void @test_f_sf_vc_vvw_se_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvw_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.vvw 3, v8, v10, v11
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f64.nxv2f32.nxv2f32.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f64.nxv2f32.nxv2f32.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, <vscale x 2 x float>, iXLen)
+
+define void @test_f_sf_vc_vvw_se_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvw_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.vvw 3, v8, v12, v14
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f64.nxv4f32.nxv4f32.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f64.nxv4f32.nxv4f32.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, <vscale x 4 x float>, iXLen)
+
+define void @test_f_sf_vc_vvw_se_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_vvw_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.vvw 3, v8, v16, v20
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f64.nxv8f32.nxv8f32.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f64.nxv8f32.nxv8f32.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, <vscale x 8 x float>, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_vvw_se_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv1f32.iXLen.nxv1f16.nxv1f16.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv1f32.iXLen.nxv1f16.nxv1f16.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, <vscale x 1 x half>, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_vvw_se_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv2f32.iXLen.nxv2f16.nxv2f16.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv2f32.iXLen.nxv2f16.nxv2f16.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, <vscale x 2 x half>, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_vvw_se_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v10, v11
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv4f32.iXLen.nxv4f16.nxv4f16.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv4f32.iXLen.nxv4f16.nxv4f16.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, <vscale x 4 x half>, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_vvw_se_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v12, v14
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv8f32.iXLen.nxv8f16.nxv8f16.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv8f32.iXLen.nxv8f16.nxv8f16.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, <vscale x 8 x half>, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_vvw_se_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v16, v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv16f32.iXLen.nxv16f16.nxv16f16.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv16f32.iXLen.nxv16f16.nxv16f16.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, <vscale x 16 x half>, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_vvw_se_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv1f64.iXLen.nxv1f32.nxv1f32.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv1f64.iXLen.nxv1f32.nxv1f32.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, <vscale x 1 x float>, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_vvw_se_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v10, v11
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv2f64.iXLen.nxv2f32.nxv2f32.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv2f64.iXLen.nxv2f32.nxv2f32.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, <vscale x 2 x float>, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_vvw_se_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v12, v14
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv4f64.iXLen.nxv4f32.nxv4f32.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv4f64.iXLen.nxv4f32.nxv4f32.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, <vscale x 4 x float>, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_vvw_se_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v16, v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv8f64.iXLen.nxv8f32.nxv8f32.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv8f64.iXLen.nxv8f32.nxv8f32.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, <vscale x 8 x float>, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_vvw_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvw.nxv1f32.iXLen.nxv1f16.nxv1f16.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvw.nxv1f32.iXLen.nxv1f16.nxv1f16.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, <vscale x 1 x half>, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_vvw_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvw.nxv2f32.iXLen.nxv2f16.nxv2f16.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvw.nxv2f32.iXLen.nxv2f16.nxv2f16.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, <vscale x 2 x half>, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_vvw_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v10, v11
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvw.nxv4f32.iXLen.nxv4f16.nxv4f16.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvw.nxv4f32.iXLen.nxv4f16.nxv4f16.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, <vscale x 4 x half>, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_vvw_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v12, v14
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvw.nxv8f32.iXLen.nxv8f16.nxv8f16.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvw.nxv8f32.iXLen.nxv8f16.nxv8f16.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, <vscale x 8 x half>, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_vvw_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v16, v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvw.nxv16f32.iXLen.nxv16f16.nxv16f16.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvw.nxv16f32.iXLen.nxv16f16.nxv16f16.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, <vscale x 16 x half>, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_vvw_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvw.nxv1f64.iXLen.nxv1f32.nxv1f32.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvw.nxv1f64.iXLen.nxv1f32.nxv1f32.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, <vscale x 1 x float>, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_vvw_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v10, v11
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvw.nxv2f64.iXLen.nxv2f32.nxv2f32.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvw.nxv2f64.iXLen.nxv2f32.nxv2f32.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, <vscale x 2 x float>, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_vvw_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v12, v14
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvw.nxv4f64.iXLen.nxv4f32.nxv4f32.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvw.nxv4f64.iXLen.nxv4f32.nxv4f32.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, <vscale x 4 x float>, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_vvw_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_vvw_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v16, v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvw.nxv8f64.iXLen.nxv8f32.nxv8f32.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvw.nxv8f64.iXLen.nxv8f32.nxv8f32.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, <vscale x 8 x float>, iXLen)
+
+define void @test_f_sf_vc_xvw_se_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvw_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f32.nxv1f16.i16.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f32.nxv1f16.i16.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xvw_se_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvw_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f32.nxv2f16.i16.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f32.nxv2f16.i16.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xvw_se_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvw_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.xvw 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f32.nxv4f16.i16.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f32.nxv4f16.i16.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xvw_se_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvw_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.xvw 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f32.nxv8f16.i16.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f32.nxv8f16.i16.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xvw_se_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvw_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.xvw 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16f32.nxv16f16.i16.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16f32.nxv16f16.i16.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, i16, iXLen)
+
+define void @test_f_sf_vc_xvw_se_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvw_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f64.nxv1f32.i32.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f64.nxv1f32.i32.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, i32, iXLen)
+
+define void @test_f_sf_vc_xvw_se_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvw_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.xvw 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f64.nxv2f32.i32.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f64.nxv2f32.i32.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, i32, iXLen)
+
+define void @test_f_sf_vc_xvw_se_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvw_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.xvw 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f64.nxv4f32.i32.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f64.nxv4f32.i32.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, i32, iXLen)
+
+define void @test_f_sf_vc_xvw_se_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_xvw_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.xvw 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f64.nxv8f32.i32.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f64.nxv8f32.i32.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, i32, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_xvw_se_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv1f32.iXLen.nxv1f16.i16.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv1f32.iXLen.nxv1f16.i16.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, i16, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_xvw_se_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv2f32.iXLen.nxv2f16.i16.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv2f32.iXLen.nxv2f16.i16.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, i16, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_xvw_se_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv4f32.iXLen.nxv4f16.i16.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv4f32.iXLen.nxv4f16.i16.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, i16, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_xvw_se_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv8f32.iXLen.nxv8f16.i16.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv8f32.iXLen.nxv8f16.i16.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, i16, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_xvw_se_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv16f32.iXLen.nxv16f16.i16.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv16f32.iXLen.nxv16f16.i16.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, i16, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_xvw_se_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv1f64.i32.nxv1f32.iXLen.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv1f64.i32.nxv1f32.iXLen.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, i32, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_xvw_se_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv2f64.i32.nxv2f32.iXLen.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv2f64.i32.nxv2f32.iXLen.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, i32, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_xvw_se_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv4f64.i32.nxv4f32.iXLen.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv4f64.i32.nxv4f32.iXLen.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, i32, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_xvw_se_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv8f64.i32.nxv8f32.iXLen.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv8f64.i32.nxv8f32.iXLen.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, i32, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_xvw_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvw.nxv1f32.iXLen.nxv1f16.i16.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvw.nxv1f32.iXLen.nxv1f16.i16.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, i16, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_xvw_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvw.nxv2f32.iXLen.nxv2f16.i16.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvw.nxv2f32.iXLen.nxv2f16.i16.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, i16, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_xvw_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvw.nxv4f32.iXLen.nxv4f16.i16.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvw.nxv4f32.iXLen.nxv4f16.i16.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, i16, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_xvw_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvw.nxv8f32.iXLen.nxv8f16.i16.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvw.nxv8f32.iXLen.nxv8f16.i16.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, i16, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_xvw_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvw.nxv16f32.iXLen.nxv16f16.i16.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvw.nxv16f32.iXLen.nxv16f16.i16.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, i16, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_xvw_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.xvw.nxv1f64.iXLen.nxv1f32.i32.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.xvw.nxv1f64.iXLen.nxv1f32.i32.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, i32, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_xvw_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.xvw.nxv2f64.iXLen.nxv2f32.i32.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.xvw.nxv2f64.iXLen.nxv2f32.i32.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, i32, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_xvw_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.xvw.nxv4f64.iXLen.nxv4f32.i32.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.xvw.nxv4f64.iXLen.nxv4f32.i32.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, i32, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_xvw_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_xvw_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.xvw.nxv8f64.iXLen.nxv8f32.i32.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.xvw.nxv8f64.iXLen.nxv8f32.i32.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, i32, iXLen)
+
+define void @test_f_sf_vc_ivw_se_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivw_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f32.nxv1f16.iXLen.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f32.nxv1f16.iXLen.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivw_se_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivw_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f32.nxv2f16.iXLen.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f32.nxv2f16.iXLen.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivw_se_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivw_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.ivw 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f32.nxv4f16.iXLen.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f32.nxv4f16.iXLen.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivw_se_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivw_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.ivw 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f32.nxv8f16.iXLen.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f32.nxv8f16.iXLen.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivw_se_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivw_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.ivw 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16f32.nxv16f16.iXLen.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16f32.nxv16f16.iXLen.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivw_se_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivw_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f64.nxv1f32.iXLen.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f64.nxv1f32.iXLen.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivw_se_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivw_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.ivw 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f64.nxv2f32.iXLen.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f64.nxv2f32.iXLen.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivw_se_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivw_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.ivw 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f64.nxv4f32.iXLen.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f64.nxv4f32.iXLen.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, iXLen, iXLen)
+
+define void @test_f_sf_vc_ivw_se_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_ivw_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.ivw 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f64.nxv8f32.iXLen.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, iXLen 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f64.nxv8f32.iXLen.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, iXLen, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_ivw_se_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv1f32.iXLen.nxv1f16.iXLen.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv1f32.iXLen.nxv1f16.iXLen.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, iXLen, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_ivw_se_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv2f32.iXLen.nxv2f16.iXLen.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv2f32.iXLen.nxv2f16.iXLen.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, iXLen, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_ivw_se_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv4f32.iXLen.nxv4f16.iXLen.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv4f32.iXLen.nxv4f16.iXLen.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, iXLen, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_ivw_se_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv8f32.iXLen.nxv8f16.iXLen.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv8f32.iXLen.nxv8f16.iXLen.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, iXLen, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_ivw_se_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv16f32.iXLen.nxv16f16.iXLen.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv16f32.iXLen.nxv16f16.iXLen.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, iXLen, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_ivw_se_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv1f64.iXLen.nxv1f32.iXLen.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv1f64.iXLen.nxv1f32.iXLen.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, iXLen, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_ivw_se_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv2f64.iXLen.nxv2f32.iXLen.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv2f64.iXLen.nxv2f32.iXLen.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, iXLen, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_ivw_se_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv4f64.iXLen.nxv4f32.iXLen.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv4f64.iXLen.nxv4f32.iXLen.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, iXLen, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_ivw_se_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv8f64.iXLen.nxv8f32.iXLen.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv8f64.iXLen.nxv8f32.iXLen.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, iXLen, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_ivw_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivw.nxv1f32.iXLen.nxv1f16.iXLen.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivw.nxv1f32.iXLen.nxv1f16.iXLen.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, iXLen, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_ivw_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivw.nxv2f32.iXLen.nxv2f16.iXLen.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivw.nxv2f32.iXLen.nxv2f16.iXLen.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, iXLen, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_ivw_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivw.nxv4f32.iXLen.nxv4f16.iXLen.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivw.nxv4f32.iXLen.nxv4f16.iXLen.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, iXLen, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_ivw_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivw.nxv8f32.iXLen.nxv8f16.iXLen.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivw.nxv8f32.iXLen.nxv8f16.iXLen.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, iXLen, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_ivw_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivw.nxv16f32.iXLen.nxv16f16.iXLen.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivw.nxv16f32.iXLen.nxv16f16.iXLen.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, iXLen, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_ivw_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivw.nxv1f64.iXLen.nxv1f32.iXLen.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivw.nxv1f64.iXLen.nxv1f32.iXLen.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, iXLen, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_ivw_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivw.nxv2f64.iXLen.nxv2f32.iXLen.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivw.nxv2f64.iXLen.nxv2f32.iXLen.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, iXLen, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_ivw_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivw.nxv4f64.iXLen.nxv4f32.iXLen.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivw.nxv4f64.iXLen.nxv4f32.iXLen.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, iXLen, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_ivw_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_ivw_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivw.nxv8f64.iXLen.nxv8f32.iXLen.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, iXLen 10, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivw.nxv8f64.iXLen.nxv8f32.iXLen.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, iXLen, iXLen)
+
+define void @test_f_sf_vc_fvw_se_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvw_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.fvw 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f32.nxv1f16.f16.iXLen(iXLen 1, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f32.nxv1f16.f16.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fvw_se_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvw_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.fvw 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f32.nxv2f16.f16.iXLen(iXLen 1, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f32.nxv2f16.f16.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fvw_se_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvw_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.fvw 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f32.nxv4f16.f16.iXLen(iXLen 1, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f32.nxv4f16.f16.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fvw_se_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvw_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.fvw 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f32.nxv8f16.f16.iXLen(iXLen 1, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f32.nxv8f16.f16.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fvw_se_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvw_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.fvw 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv16f32.nxv16f16.f16.iXLen(iXLen 1, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv16f32.nxv16f16.f16.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, half, iXLen)
+
+define void @test_f_sf_vc_fvw_se_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvw_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.fvw 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f64.nxv1f32.f32.iXLen(iXLen 1, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f64.nxv1f32.f32.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, float, iXLen)
+
+define void @test_f_sf_vc_fvw_se_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvw_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.fvw 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f64.nxv2f32.f32.iXLen(iXLen 1, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f64.nxv2f32.f32.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, float, iXLen)
+
+define void @test_f_sf_vc_fvw_se_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvw_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.fvw 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f64.nxv4f32.f32.iXLen(iXLen 1, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f64.nxv4f32.f32.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, float, iXLen)
+
+define void @test_f_sf_vc_fvw_se_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_fvw_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.fvw 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f64.nxv8f32.f32.iXLen(iXLen 1, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f64.nxv8f32.f32.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, float, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_fvw_se_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv1f32.iXLen.nxv1f16.f16.iXLen(iXLen 1, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv1f32.iXLen.nxv1f16.f16.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, half, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_fvw_se_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv2f32.iXLen.nxv2f16.f16.iXLen(iXLen 1, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv2f32.iXLen.nxv2f16.f16.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, half, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_fvw_se_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv4f32.iXLen.nxv4f16.f16.iXLen(iXLen 1, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv4f32.iXLen.nxv4f16.f16.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, half, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_fvw_se_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv8f32.iXLen.nxv8f16.f16.iXLen(iXLen 1, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv8f32.iXLen.nxv8f16.f16.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, half, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_fvw_se_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv16f32.iXLen.nxv16f16.f16.iXLen(iXLen 1, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv16f32.iXLen.nxv16f16.f16.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, half, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_fvw_se_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv1f64.iXLen.nxv1f32.f32.iXLen(iXLen 1, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv1f64.iXLen.nxv1f32.f32.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, float, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_fvw_se_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv2f64.iXLen.nxv2f32.f32.iXLen(iXLen 1, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv2f64.iXLen.nxv2f32.f32.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, float, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_fvw_se_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv4f64.iXLen.nxv4f32.f32.iXLen(iXLen 1, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv4f64.iXLen.nxv4f32.f32.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, float, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_fvw_se_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv8f64.iXLen.nxv8f32.f32.iXLen(iXLen 1, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv8f64.iXLen.nxv8f32.f32.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, float, iXLen)
+
+define <vscale x 1 x float> @test_f_sf_vc_v_fvw_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvw.nxv1f32.iXLen.nxv1f16.f16.iXLen(iXLen 1, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 1 x float> %0
+}
+
+declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvw.nxv1f32.iXLen.nxv1f16.f16.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, half, iXLen)
+
+define <vscale x 2 x float> @test_f_sf_vc_v_fvw_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvw.nxv2f32.iXLen.nxv2f16.f16.iXLen(iXLen 1, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 2 x float> %0
+}
+
+declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvw.nxv2f32.iXLen.nxv2f16.f16.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, half, iXLen)
+
+define <vscale x 4 x float> @test_f_sf_vc_v_fvw_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvw.nxv4f32.iXLen.nxv4f16.f16.iXLen(iXLen 1, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 4 x float> %0
+}
+
+declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvw.nxv4f32.iXLen.nxv4f16.f16.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, half, iXLen)
+
+define <vscale x 8 x float> @test_f_sf_vc_v_fvw_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvw.nxv8f32.iXLen.nxv8f16.f16.iXLen(iXLen 1, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 8 x float> %0
+}
+
+declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvw.nxv8f32.iXLen.nxv8f16.f16.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, half, iXLen)
+
+define <vscale x 16 x float> @test_f_sf_vc_v_fvw_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvw.nxv16f32.iXLen.nxv16f16.f16.iXLen(iXLen 1, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvw.nxv16f32.iXLen.nxv16f16.f16.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, half, iXLen)
+
+define <vscale x 1 x double> @test_f_sf_vc_v_fvw_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvw.nxv1f64.iXLen.nxv1f32.f32.iXLen(iXLen 1, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 1 x double> %0
+}
+
+declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvw.nxv1f64.iXLen.nxv1f32.f32.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, float, iXLen)
+
+define <vscale x 2 x double> @test_f_sf_vc_v_fvw_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvw.nxv2f64.iXLen.nxv2f32.f32.iXLen(iXLen 1, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 2 x double> %0
+}
+
+declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvw.nxv2f64.iXLen.nxv2f32.f32.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, float, iXLen)
+
+define <vscale x 4 x double> @test_f_sf_vc_v_fvw_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvw.nxv4f64.iXLen.nxv4f32.f32.iXLen(iXLen 1, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 4 x double> %0
+}
+
+declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvw.nxv4f64.iXLen.nxv4f32.f32.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, float, iXLen)
+
+define <vscale x 8 x double> @test_f_sf_vc_v_fvw_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_f_sf_vc_v_fvw_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvw.nxv8f64.iXLen.nxv8f32.f32.iXLen(iXLen 1, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvw.nxv8f64.iXLen.nxv8f32.f32.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, float, iXLen)


        


More information about the llvm-commits mailing list