[llvm-branch-commits] [llvm] 229482f - [SiFive] Support LLVM IR intrinsics for xsfvcp extension.

via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Mon Nov 14 23:54:05 PST 2022


Author: Nelson Chu
Date: 2022-11-14T23:51:19-08:00
New Revision: 229482fe29e7adae93da36fb708c25075fbd9be6

URL: https://github.com/llvm/llvm-project/commit/229482fe29e7adae93da36fb708c25075fbd9be6
DIFF: https://github.com/llvm/llvm-project/commit/229482fe29e7adae93da36fb708c25075fbd9be6.diff

LOG: [SiFive] Support LLVM IR intrinsics for xsfvcp extension.

The sf.vc.x and sf.vc.i intrinsics don't have any vector types in the output
and inputs, but the intrinsic spec said that we still need to add vetvli for
them.  Therefore, we need to encode the VTYPE to these IR names.

Architecture spec: https://docs.google.com/document/d/1t7KkKILmmQ0mAgnDLj1s2ks-UXkMR9cWOHuIsewovPE/edit

Intrinsic spec: https://github.com/sifive/rvv-intrinsic-internal/blob/master/rvv_intrinsic_gen/overloaded_intrinsic_funcs/11_sifive_custom_vector_extension_functions.md#vcix-instructions-xsfvcp

Added: 
    llvm/test/CodeGen/RISCV/rvv/xsfvcp-x-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll
    llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv.ll
    llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll
    llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvw.ll

Modified: 
    llvm/include/llvm/IR/IntrinsicsRISCV.td
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index 002920a2e194d..52ce0ccc65691 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -126,6 +126,13 @@ class RISCVVIntrinsic {
   bits<5> VLOperand = NoVLOperand;
 }
 
+class VCIXSuffix<string range> {
+  list<string> suffix = !cond(!eq(range, "c"): ["e8mf8", "e8mf4", "e8mf2", "e8m1", "e8m2", "e8m4", "e8m8"],
+                              !eq(range, "s"): ["e16mf4", "e16mf2", "e16m1", "e16m2", "e16m4", "e16m8"],
+                              !eq(range, "i"): ["e32mf2", "e32m1", "e32m2", "e32m4", "e32m8"],
+                              !eq(range, "l"): ["e64m1", "e64m2", "e64m4", "e64m8"]);
+}
+
 let TargetPrefix = "riscv" in {
   // We use anyint here but we only support XLen.
   def int_riscv_vsetvli   : Intrinsic<[llvm_anyint_ty],
@@ -415,6 +422,75 @@ let TargetPrefix = "riscv" in {
                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
     let VLOperand = 4;
   }
+
+  // Output: (vector_out) or ()
+  // Input: (bit<27-26>, bit<24-20>, scalar_in, vl) or
+  //        (bit<27-26>, bit<24-20>, bit<11-7>, scalar_in, vl)
+  class RISCVSFCustomVC_X<bit HasDst, bit HasSE, bit ImmScalar>
+        : Intrinsic<!if(HasDst, [llvm_anyvector_ty], []),
+                    !listconcat(!if(HasDst, [llvm_i32_ty, llvm_i32_ty],
+                                            [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty]),
+                                [llvm_any_ty, llvm_anyint_ty]),
+                    !listconcat([IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>],    // bit<27-26> and bit<24-20>
+                                !if(HasDst, [], [ImmArg<ArgIndex<2>>]),                   // Vd or bit<11-7>
+                                !if(ImmScalar, !if(HasDst, [ImmArg<ArgIndex<2>>],
+                                                           [ImmArg<ArgIndex<3>>]), []),   // ScalarOperand
+                                !if(HasSE, [IntrHasSideEffects], []))>,
+          RISCVVIntrinsic {
+    let ScalarOperand = !cond(ImmScalar: NoScalarOperand,
+                              HasDst: 2,
+                              true: 3);
+    let VLOperand = !if(HasDst, 3, 4);
+  }
+  // Output: (vector_out) or ()
+  // Input: (bit<27-26>, vector_in, vector_in/scalar_in, vl) or
+  //        (bit<27-26>, bit<11-7>, vector_in, vector_in/scalar_in, vl)
+  class RISCVSFCustomVC_XV<bit HasDst, bit HasSE, bit ImmScalar>
+        : Intrinsic<!if(HasDst, [llvm_anyvector_ty], []),
+                    !listconcat(!if(HasDst, [llvm_i32_ty, LLVMMatchType<0>],
+                                            [llvm_i32_ty, llvm_i32_ty, llvm_anyvector_ty]),
+                                [llvm_any_ty, llvm_anyint_ty]),
+                    !listconcat([IntrNoMem, ImmArg<ArgIndex<0>>],                        // bit<27-26>
+                                !if(HasDst, [], [ImmArg<ArgIndex<1>>]),                  // Vd or bit<11-7>
+                                !if(ImmScalar, !if(HasDst, [ImmArg<ArgIndex<2>>],
+                                                           [ImmArg<ArgIndex<3>>]), []),  // ScalarOperand
+                                !if(HasSE, [IntrHasSideEffects], []))>,
+          RISCVVIntrinsic {
+    let ScalarOperand = !cond(ImmScalar: NoScalarOperand,
+                              HasDst: 2,
+                              true: 3);
+    let VLOperand = !if(HasDst, 3, 4);
+  }
+  // Output: (vector_out) or ()
+  // Input: (bit<27-26>, passthru, vector_in, vector_in/scalar_in, vl) or
+  //        (bit<27-26>, vector_in, vector_in, vector_in/scalar_in, vl)
+  class RISCVSFCustomVC_XVV<bit HasDst, bit HasSE, bit ImmScalar>
+        : Intrinsic<!if(HasDst, [llvm_anyvector_ty], []),
+                    !listconcat(!if(HasDst, [llvm_i32_ty, LLVMMatchType<0>, LLVMMatchType<0>],
+                                            [llvm_i32_ty, llvm_anyvector_ty, LLVMMatchType<0>]),
+                                [llvm_any_ty, llvm_anyint_ty]),
+                    !listconcat([IntrNoMem, ImmArg<ArgIndex<0>>],                        // bit<27-26>
+                                !if(ImmScalar, [ImmArg<ArgIndex<3>>], []),               // ScalarOperand
+                                !if(HasSE, [IntrHasSideEffects], []))>,
+          RISCVVIntrinsic {
+    let ScalarOperand = !if(ImmScalar, NoScalarOperand, 3);
+    let VLOperand = 4;
+  }
+  // Output: (wvector_out) or ()
+  // Input: (bit<27-26>, passthru, vector_in, vector_in/scalar_in, vl) or
+  //        (bit<27-26>, wvector_in, vector_in, vector_in/scalar_in, vl)
+  class RISCVSFCustomVC_XVW<bit HasDst, bit HasSE, bit ImmScalar>
+        : Intrinsic<!if(HasDst, [llvm_anyvector_ty], []),
+                    !listconcat(!if(HasDst, [llvm_i32_ty, LLVMMatchType<0>, llvm_anyvector_ty],
+                                            [llvm_i32_ty, llvm_anyvector_ty, llvm_anyvector_ty]),
+                                [llvm_any_ty, llvm_anyint_ty]),
+                    !listconcat([IntrNoMem, ImmArg<ArgIndex<0>>],                        // bit<27-26>
+                                !if(ImmScalar, [ImmArg<ArgIndex<3>>], []),               // ScalarOperand
+                                !if(HasSE, [IntrHasSideEffects], []))>,
+          RISCVVIntrinsic {
+    let ScalarOperand = !if(ImmScalar, NoScalarOperand, 3);
+    let VLOperand = 4;
+  }
   // For destination vector type is the same as first source vector.
   // Input: (passthru, vector_in, vector_in/scalar_in, vl)
   class RISCVBinaryAAXUnMasked
@@ -1492,6 +1568,51 @@ let TargetPrefix = "riscv" in {
                       [NoCapture<ArgIndex<0>>, IntrReadMem]>;
   }
 
+  multiclass RISCVSFCustomVC_X<list<string> type> {
+    foreach t = type in {
+      defvar ImmScalar = !eq(t, "i");
+      defvar range = ["c", "s", "i", "l"];
+      foreach r = range in {
+        foreach s = VCIXSuffix<r>.suffix in {
+          def "int_riscv_sf_vc_" # t # "_se_" # s : RISCVSFCustomVC_X</*HasDst*/0, /*HasSE*/1, ImmScalar>;
+        }
+      }
+      def "int_riscv_sf_vc_v_" # t # "_se" : RISCVSFCustomVC_X</*HasDst*/1, /*HasSE*/1, ImmScalar>;
+      def "int_riscv_sf_vc_v_" # t         : RISCVSFCustomVC_X</*HasDst*/1, /*HasSE*/0, ImmScalar>;
+    }
+  }
+
+  multiclass RISCVSFCustomVC_XV<list<string> type> {
+    foreach t = type in {
+      defvar ImmScalar = !eq(t, "i");
+      def "int_riscv_sf_vc_" # t # "v_se"   : RISCVSFCustomVC_XV</*HasDst*/0, /*HasSE*/1, ImmScalar>;
+      def "int_riscv_sf_vc_v_" # t # "v_se" : RISCVSFCustomVC_XV</*HasDst*/1, /*HasSE*/1, ImmScalar>;
+      def "int_riscv_sf_vc_v_" # t # "v"    : RISCVSFCustomVC_XV</*HasDst*/1, /*HasSE*/0, ImmScalar>;
+    }
+  }
+
+  multiclass RISCVSFCustomVC_XVV<list<string> type> {
+    foreach t = type in {
+      defvar ImmScalar = !eq(t, "i");
+      def "int_riscv_sf_vc_" # t # "vv_se"   : RISCVSFCustomVC_XVV</*HasDst*/0, /*HasSE*/1, ImmScalar>;
+      def "int_riscv_sf_vc_v_" # t # "vv_se" : RISCVSFCustomVC_XVV</*HasDst*/1, /*HasSE*/1, ImmScalar>;
+      def "int_riscv_sf_vc_v_" # t # "vv"    : RISCVSFCustomVC_XVV</*HasDst*/1, /*HasSE*/0, ImmScalar>;
+    }
+  }
+
+  multiclass RISCVSFCustomVC_XVW<list<string> type> {
+    foreach t = type in {
+      defvar ImmScalar = !eq(t, "i");
+      def "int_riscv_sf_vc_" # t # "vw_se"   : RISCVSFCustomVC_XVW</*HasDst*/0, /*HasSE*/1, ImmScalar>;
+      def "int_riscv_sf_vc_v_" # t # "vw_se" : RISCVSFCustomVC_XVW</*HasDst*/1, /*HasSE*/1, ImmScalar>;
+      def "int_riscv_sf_vc_v_" # t # "vw"    : RISCVSFCustomVC_XVW</*HasDst*/1, /*HasSE*/0, ImmScalar>;
+    }
+  }
+
+  defm "" : RISCVSFCustomVC_X<["x", "i"]>;
+  defm "" : RISCVSFCustomVC_XV<["x", "i", "v", "f"]>;
+  defm "" : RISCVSFCustomVC_XVV<["x", "i", "v", "f"]>;
+  defm "" : RISCVSFCustomVC_XVW<["x", "i", "v", "f"]>;
 } // TargetPrefix = "riscv"
 
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index edf4ae06977d6..03c3419bcb892 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -462,6 +462,12 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
     setOperationAction({ISD::INTRINSIC_W_CHAIN, ISD::INTRINSIC_VOID},
                        MVT::Other, Custom);
 
+    if (Subtarget.hasXsfvcpInstructions()) {
+      setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom);
+      setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom);
+      if (Subtarget.is64Bit())
+        setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom);
+    }
     static const unsigned IntegerVPOps[] = {
         ISD::VP_ADD,         ISD::VP_SUB,         ISD::VP_MUL,
         ISD::VP_SDIV,        ISD::VP_UDIV,        ISD::VP_SREM,
@@ -5131,19 +5137,64 @@ SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
 }
 
+static bool VCIXScalarNeedLegalization(unsigned IntNo) {
+  switch (IntNo) {
+  default:
+    break; // Don't need to promote the scalar.
+  case Intrinsic::riscv_sf_vc_v_x:
+  case Intrinsic::riscv_sf_vc_v_xv:
+  case Intrinsic::riscv_sf_vc_v_xvv:
+  case Intrinsic::riscv_sf_vc_v_xvw:
+  case Intrinsic::riscv_sf_vc_v_x_se:
+  case Intrinsic::riscv_sf_vc_v_xv_se:
+  case Intrinsic::riscv_sf_vc_v_xvv_se:
+  case Intrinsic::riscv_sf_vc_v_xvw_se:
+  case Intrinsic::riscv_sf_vc_xv_se:
+  case Intrinsic::riscv_sf_vc_xvv_se:
+  case Intrinsic::riscv_sf_vc_xvw_se:
+  case Intrinsic::riscv_sf_vc_x_se_e8mf8:
+  case Intrinsic::riscv_sf_vc_x_se_e8mf4:
+  case Intrinsic::riscv_sf_vc_x_se_e8mf2:
+  case Intrinsic::riscv_sf_vc_x_se_e8m1:
+  case Intrinsic::riscv_sf_vc_x_se_e8m2:
+  case Intrinsic::riscv_sf_vc_x_se_e8m4:
+  case Intrinsic::riscv_sf_vc_x_se_e8m8:
+  case Intrinsic::riscv_sf_vc_x_se_e16mf4:
+  case Intrinsic::riscv_sf_vc_x_se_e16mf2:
+  case Intrinsic::riscv_sf_vc_x_se_e16m1:
+  case Intrinsic::riscv_sf_vc_x_se_e16m2:
+  case Intrinsic::riscv_sf_vc_x_se_e16m4:
+  case Intrinsic::riscv_sf_vc_x_se_e16m8:
+  case Intrinsic::riscv_sf_vc_x_se_e32mf2:
+  case Intrinsic::riscv_sf_vc_x_se_e32m1:
+  case Intrinsic::riscv_sf_vc_x_se_e32m2:
+  case Intrinsic::riscv_sf_vc_x_se_e32m4:
+  case Intrinsic::riscv_sf_vc_x_se_e32m8:
+  case Intrinsic::riscv_sf_vc_x_se_e64m1:
+  case Intrinsic::riscv_sf_vc_x_se_e64m2:
+  case Intrinsic::riscv_sf_vc_x_se_e64m4:
+  case Intrinsic::riscv_sf_vc_x_se_e64m8:
+    return true;
+  }
+  return false;
+}
+
 // Some RVV intrinsics may claim that they want an integer operand to be
 // promoted or expanded.
 static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
                                            const RISCVSubtarget &Subtarget) {
-  assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
+  assert((Op.getOpcode() == ISD::INTRINSIC_VOID ||
+          Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
          "Unexpected opcode");
 
-  if (!Subtarget.hasVInstructions())
-    return SDValue();
-
-  bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
+  bool HasChain = (Op.getOpcode() == ISD::INTRINSIC_VOID ||
+                   Op.getOpcode() == ISD::INTRINSIC_W_CHAIN);
   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
+  bool VCIX = VCIXScalarNeedLegalization(IntNo);
+
+  if (!VCIX && !Subtarget.hasVInstructions())
+    return SDValue();
   SDLoc DL(Op);
 
   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
@@ -5163,6 +5214,11 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
   if (!OpVT.isScalarInteger() || OpVT == XLenVT)
     return SDValue();
 
+  // Assert when VCIX scalar operand is larger than XLenVT.
+  if (VCIX)
+    assert(!XLenVT.bitsLT(OpVT) &&
+           "Unexpected scalar operand since SEW > XLenVT!");
+
   // Simplest case is that the operand needs to be promoted to XLenVT.
   if (OpVT.bitsLT(XLenVT)) {
     // If the operand is a constant, sign extend to increase our chances
@@ -5175,6 +5231,10 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
   }
 
+  // Don't need to do the further promotion for VCIX.
+  if (VCIX)
+    return SDValue();
+
   // Use the previous operand to get the vXi64 VT. The result might be a mask
   // VT for compares. Using the previous operand assumes that the previous
   // operand will never have a smaller element size than a scalar operand and
@@ -5560,6 +5620,8 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
   }
   }
 
+  return lowerVectorIntrinsicScalars(Op, DAG, Subtarget);
+
   return SDValue();
 }
 

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 06169022a0fa5..942545beb8962 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -68,7 +68,8 @@ class PseudoToVInst<string PseudoInst> {
                  !subst("F16", "F",
                  !subst("F32", "F",
                  !subst("F64", "F",
-                 !subst("Pseudo", "", PseudoInst)))))))))))))))))))));
+                 !subst("_SE", "",
+                 !subst("Pseudo", "", PseudoInst))))))))))))))))))))));
 }
 
 // This class describes information associated to the LMUL.
@@ -1599,6 +1600,134 @@ class VPseudoISegStoreMask<VReg ValClass, VReg IdxClass, int EEW, bits<3> LMUL,
   let HasSEWOp = 1;
 }
 
+class VPseudoVC_X<Operand OpClass, DAGOperand RS1Class,
+                  bit HasSideEffect = 1> :
+      Pseudo<(outs),
+             (ins OpClass:$op1, payload5:$rs2, payload5:$rd, RS1Class:$r1,
+                  AVL:$vl, ixlenimm:$sew), []>,
+      RISCVVPseudo {
+  let mayLoad = 0;
+  let mayStore = 0;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let HasDummyMask = 1;
+  let hasSideEffects = HasSideEffect;
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+class VPseudoVC_XV<Operand OpClass, VReg RS2Class, DAGOperand RS1Class,
+                   bit HasSideEffect = 1> :
+      Pseudo<(outs),
+             (ins OpClass:$op1, payload5:$rd, RS2Class:$rs2, RS1Class:$r1,
+                  AVL:$vl, ixlenimm:$sew), []>,
+      RISCVVPseudo {
+  let mayLoad = 0;
+  let mayStore = 0;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let HasDummyMask = 1;
+  let hasSideEffects = HasSideEffect;
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+class VPseudoVC_XVV<Operand OpClass, VReg RDClass, VReg RS2Class,
+                    DAGOperand RS1Class, bit HasSideEffect = 1> :
+      Pseudo<(outs),
+             (ins OpClass:$op1, RDClass:$rd, RS2Class:$rs2, RS1Class:$r1,
+                  AVL:$vl, ixlenimm:$sew), []>,
+      RISCVVPseudo {
+  let mayLoad = 0;
+  let mayStore = 0;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let HasDummyMask = 1;
+  let hasSideEffects = HasSideEffect;
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+class VPseudoVC_V_X<Operand OpClass, VReg RDClass, DAGOperand RS1Class,
+                    bit HasSideEffect = 1> :
+      Pseudo<(outs RDClass:$rd),
+             (ins OpClass:$op1, payload5:$rs2, RS1Class:$r1,
+                  AVL:$vl, ixlenimm:$sew), []>,
+      RISCVVPseudo {
+  let mayLoad = 0;
+  let mayStore = 0;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let HasDummyMask = 1;
+  let hasSideEffects = HasSideEffect;
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+class VPseudoVC_V_XV<Operand OpClass, VReg RDClass, VReg RS2Class,
+                     DAGOperand RS1Class, bit HasSideEffect = 1> :
+      Pseudo<(outs RDClass:$rd),
+             (ins OpClass:$op1, RS2Class:$rs2, RS1Class:$r1,
+                  AVL:$vl, ixlenimm:$sew), []>,
+      RISCVVPseudo {
+  let mayLoad = 0;
+  let mayStore = 0;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let HasDummyMask = 1;
+  let hasSideEffects = HasSideEffect;
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+class VPseudoVC_V_XVV<Operand OpClass, VReg RDClass, VReg RS2Class,
+                      DAGOperand RS1Class, bit HasSideEffect = 1> :
+      Pseudo<(outs RDClass:$rd),
+             (ins OpClass:$op1, RDClass:$rs3, RS2Class:$rs2, RS1Class:$r1,
+                  AVL:$vl, ixlenimm:$sew), []>,
+      RISCVVPseudo {
+  let mayLoad = 0;
+  let mayStore = 0;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let HasDummyMask = 1;
+  let hasSideEffects = HasSideEffect;
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+multiclass VPseudoVC_X<LMULInfo m, DAGOperand RS1Class,
+                       Operand OpClass = payload2>{
+  let VLMul = m.value in {
+    def "PseudoVC_" # NAME # "_SE_" # m.MX : VPseudoVC_X<OpClass, RS1Class>;
+    def "PseudoVC_V_" # NAME # "_SE_" # m.MX : VPseudoVC_V_X<OpClass, m.vrclass, RS1Class>;
+    def "PseudoVC_V_" # NAME # "_" # m.MX : VPseudoVC_V_X<OpClass, m.vrclass, RS1Class, 0>;
+  }
+}
+
+multiclass VPseudoVC_XV<LMULInfo m, DAGOperand RS1Class,
+                        Operand OpClass = payload2>{
+  let VLMul = m.value in {
+    def "PseudoVC_" # NAME # "_SE_" # m.MX : VPseudoVC_XV<OpClass, m.vrclass, RS1Class>;
+    def "PseudoVC_V_" # NAME # "_SE_" # m.MX : VPseudoVC_V_XV<OpClass, m.vrclass, m.vrclass, RS1Class>;
+    def "PseudoVC_V_" # NAME # "_" # m.MX : VPseudoVC_V_XV<OpClass, m.vrclass, m.vrclass, RS1Class, 0>;
+  }
+}
+
+multiclass VPseudoVC_XVV<LMULInfo m, DAGOperand RS1Class,
+                         Operand OpClass = payload2>{
+  let VLMul = m.value in {
+    def "PseudoVC_" # NAME # "_SE_" # m.MX : VPseudoVC_XVV<OpClass, m.vrclass, m.vrclass, RS1Class>;
+    def "PseudoVC_V_" # NAME # "_SE_" # m.MX : VPseudoVC_V_XVV<OpClass, m.vrclass, m.vrclass, RS1Class>;
+    def "PseudoVC_V_" # NAME # "_" # m.MX : VPseudoVC_V_XVV<OpClass, m.vrclass, m.vrclass, RS1Class, 0>;
+  }
+}
+
+multiclass VPseudoVC_XVW<LMULInfo m, DAGOperand RS1Class,
+                         Operand OpClass = payload2>{
+  let VLMul = m.value in {
+    def "PseudoVC_" # NAME # "_SE_" # m.MX : VPseudoVC_XVV<OpClass, m.wvrclass, m.vrclass, RS1Class>;
+    let Constraints = "@earlyclobber $rd, $rd = $rs3" in {
+      def "PseudoVC_V_" # NAME # "_SE_" # m.MX : VPseudoVC_V_XVV<OpClass, m.wvrclass, m.vrclass, RS1Class>;
+      def "PseudoVC_V_" # NAME # "_" # m.MX : VPseudoVC_V_XVV<OpClass, m.wvrclass, m.vrclass, RS1Class, 0>;
+    }
+  }
+}
+
 multiclass VPseudoUSLoad {
   foreach eew = EEWList in {
     foreach lmul = MxSet<eew>.m in {
@@ -3700,6 +3829,73 @@ class VPatTernaryMaskPolicy<string intrinsic,
                     (mask_type V0),
                     GPR:$vl, sew, (XLenVT timm:$policy))>;
 
+class VPatVC_OP4<string intrinsic_name,
+                 string inst,
+                 ValueType op2_type,
+                 ValueType op3_type,
+                 ValueType op4_type,
+                 int sew,
+                 DAGOperand op2_kind,
+                 DAGOperand op3_kind,
+                 DAGOperand op4_kind,
+                 Operand op1_kind = payload2> :
+  Pat<(!cast<Intrinsic>(intrinsic_name)
+       (i32      op1_kind:$op1),
+       (op2_type op2_kind:$op2),
+       (op3_type op3_kind:$op3),
+       (op4_type op4_kind:$op4),
+       VLOpFrag),
+      (!cast<Instruction>(inst)
+       (i32      op1_kind:$op1),
+       (op2_type op2_kind:$op2),
+       (op3_type op3_kind:$op3),
+       (op4_type op4_kind:$op4),
+       GPR:$vl, sew)>;
+
+class VPatVC_V_OP4<string intrinsic_name,
+                   string inst,
+                   ValueType result_type,
+                   ValueType op2_type,
+                   ValueType op3_type,
+                   ValueType op4_type,
+                   int sew,
+                   DAGOperand op2_kind,
+                   DAGOperand op3_kind,
+                   DAGOperand op4_kind,
+                   Operand op1_kind = payload2> :
+  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
+                    (i32      op1_kind:$op1),
+                    (op2_type op2_kind:$op2),
+                    (op3_type op3_kind:$op3),
+                    (op4_type op4_kind:$op4),
+                    VLOpFrag)),
+                   (!cast<Instruction>(inst)
+                    (i32      op1_kind:$op1),
+                    (op2_type op2_kind:$op2),
+                    (op3_type op3_kind:$op3),
+                    (op4_type op4_kind:$op4),
+                    GPR:$vl, sew)>;
+
+class VPatVC_V_OP3<string intrinsic_name,
+                   string inst,
+                   ValueType result_type,
+                   ValueType op2_type,
+                   ValueType op3_type,
+                   int sew,
+                   DAGOperand op2_kind,
+                   DAGOperand op3_kind,
+                   Operand op1_kind = payload2> :
+  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
+                    (i32      op1_kind:$op1),
+                    (op2_type op2_kind:$op2),
+                    (op3_type op3_kind:$op3),
+                    VLOpFrag)),
+                   (!cast<Instruction>(inst)
+                    (i32      op1_kind:$op1),
+                    (op2_type op2_kind:$op2),
+                    (op3_type op3_kind:$op3),
+                    GPR:$vl, sew)>;
+
 multiclass VPatUnaryS_M<string intrinsic_name,
                              string inst>
 {
@@ -5387,6 +5583,37 @@ defm PseudoVRGATHEREI16 : VPseudoVGTR_VV_EEW</* eew */ 16, "@earlyclobber $rd">;
 //===----------------------------------------------------------------------===//
 defm PseudoVCOMPRESS : VPseudoVCPR_V;
 
+//===----------------------------------------------------------------------===//
+// SiFive Custom Instructions
+//===----------------------------------------------------------------------===//
+let Predicates = [HasExtXsfvcp] in {
+  foreach m = MxList in {
+    defm X : VPseudoVC_X<m, GPR>;
+    defm I : VPseudoVC_X<m, simm5_i32>;
+    defm XV : VPseudoVC_XV<m, GPR>;
+    defm IV : VPseudoVC_XV<m, simm5_i32>;
+    defm VV : VPseudoVC_XV<m, m.vrclass>;
+    defm XVV : VPseudoVC_XVV<m, GPR>;
+    defm IVV : VPseudoVC_XVV<m, simm5_i32>;
+    defm VVV : VPseudoVC_XVV<m, m.vrclass>;
+  }
+  foreach f = FPList in {
+    foreach m = f.MxList in {
+    defm f.FX # "V" : VPseudoVC_XV<m, f.fprclass, payload1>;
+    defm f.FX # "VV" : VPseudoVC_XVV<m, f.fprclass, payload1>;
+    }
+  }
+  foreach m = MxListW in {
+    defm XVW : VPseudoVC_XVW<m, GPR>;
+    defm IVW : VPseudoVC_XVW<m, simm5_i32>;
+    defm VVW : VPseudoVC_XVW<m, m.vrclass>;
+  }
+  foreach f = FPListW in {
+    foreach m = f.MxList in
+    defm f.FX # "VW" : VPseudoVC_XVW<m, f.fprclass, payload1>;
+  }
+}
+
 //===----------------------------------------------------------------------===//
 // Patterns.
 //===----------------------------------------------------------------------===//
@@ -5966,6 +6193,111 @@ let Predicates = [HasVInstructionsAnyF] in {
   defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectors>;
 } // Predicates = [HasVInstructionsAnyF]
 
+//===----------------------------------------------------------------------===//
+// SiFive Custom Instructions
+//===----------------------------------------------------------------------===//
+class GetFTypeInfo<int sew> {
+  ValueType Scalar = !cond(!eq(sew, 16): f16,
+                           !eq(sew, 32): f32,
+                           !eq(sew, 64): f64);
+  RegisterClass ScalarRegClass = !cond(!eq(sew, 16): FPR16,
+                                       !eq(sew, 32): FPR32,
+                                       !eq(sew, 64): FPR64);
+}
+
+class GetLMulLowerMX<string MX> {
+  string mx = !cond(!eq(MX, "MF8"): "mf8",
+                    !eq(MX, "MF4"): "mf4",
+                    !eq(MX, "MF2"): "mf2",
+                    !eq(MX, "M1") : "m1",
+                    !eq(MX, "M2") : "m2",
+                    !eq(MX, "M4") : "m4",
+                    !eq(MX, "M8") : "m8");
+}
+
+multiclass VPatVC_X<string intrinsic_suffix, string instruction_suffix,
+                    VTypeInfo vti, ValueType type, DAGOperand kind> {
+  def : VPatVC_OP4<"int_riscv_sf_vc_" # intrinsic_suffix # "_se_e" # vti.SEW # GetLMulLowerMX<vti.LMul.MX>.mx,
+                   "PseudoVC_" # instruction_suffix # "_SE_" # vti.LMul.MX,
+                   i32, i32, type, vti.Log2SEW,
+                   payload5, payload5, kind>;
+  def : VPatVC_V_OP3<"int_riscv_sf_vc_v_" # intrinsic_suffix # "_se",
+                     "PseudoVC_V_" # instruction_suffix # "_SE_" # vti.LMul.MX,
+                     vti.Vector, i32, type, vti.Log2SEW,
+                     payload5, kind>;
+  def : VPatVC_V_OP3<"int_riscv_sf_vc_v_" # intrinsic_suffix,
+                     "PseudoVC_V_" # instruction_suffix # "_" # vti.LMul.MX,
+                     vti.Vector, i32, type, vti.Log2SEW,
+                     payload5, kind>;
+}
+
+multiclass VPatVC_XV<string intrinsic_suffix, string instruction_suffix,
+                     VTypeInfo vti, ValueType type, DAGOperand kind,
+                     Operand op1_kind = payload2> {
+  def : VPatVC_OP4<"int_riscv_sf_vc_" # intrinsic_suffix # "_se",
+                   "PseudoVC_" # instruction_suffix # "_SE_" # vti.LMul.MX,
+                   i32, vti.Vector, type, vti.Log2SEW,
+                   payload5, vti.RegClass, kind, op1_kind>;
+  def : VPatVC_V_OP3<"int_riscv_sf_vc_v_" # intrinsic_suffix # "_se",
+                     "PseudoVC_V_" # instruction_suffix # "_SE_" # vti.LMul.MX,
+                     vti.Vector, vti.Vector, type, vti.Log2SEW,
+                     vti.RegClass, kind, op1_kind>;
+  def : VPatVC_V_OP3<"int_riscv_sf_vc_v_" # intrinsic_suffix,
+                     "PseudoVC_V_" # instruction_suffix # "_" # vti.LMul.MX,
+                     vti.Vector, vti.Vector, type, vti.Log2SEW,
+                     vti.RegClass, kind, op1_kind>;
+}
+
+multiclass VPatVC_XVV<string intrinsic_suffix, string instruction_suffix,
+                      VTypeInfo wti, VTypeInfo vti, ValueType type, DAGOperand kind,
+                      Operand op1_kind = payload2> {
+  def : VPatVC_OP4<"int_riscv_sf_vc_" # intrinsic_suffix # "_se",
+                   "PseudoVC_" # instruction_suffix # "_SE_" # vti.LMul.MX,
+                   wti.Vector, vti.Vector, type, vti.Log2SEW,
+                   wti.RegClass, vti.RegClass, kind, op1_kind>;
+  def : VPatVC_V_OP4<"int_riscv_sf_vc_v_" # intrinsic_suffix # "_se",
+                     "PseudoVC_V_" # instruction_suffix # "_SE_" # vti.LMul.MX,
+                     wti.Vector, wti.Vector, vti.Vector, type, vti.Log2SEW,
+                     wti.RegClass, vti.RegClass, kind, op1_kind>;
+  def : VPatVC_V_OP4<"int_riscv_sf_vc_v_" # intrinsic_suffix,
+                     "PseudoVC_V_" # instruction_suffix # "_" # vti.LMul.MX,
+                     wti.Vector, wti.Vector, vti.Vector, type, vti.Log2SEW,
+                     wti.RegClass, vti.RegClass, kind, op1_kind>;
+}
+
+let Predicates = [HasExtXsfvcp] in {
+  foreach vti = AllIntegerVectors in {
+    defm : VPatVC_X<"x", "X", vti, vti.Scalar, vti.ScalarRegClass>;
+    defm : VPatVC_X<"i", "I", vti, i32, simm5_i32>;
+    defm : VPatVC_XV<"xv", "XV", vti, vti.Scalar, vti.ScalarRegClass>;
+    defm : VPatVC_XV<"iv", "IV", vti, i32, simm5_i32>;
+    defm : VPatVC_XV<"vv", "VV", vti, vti.Vector, vti.RegClass>;
+    defm : VPatVC_XVV<"xvv", "XVV", vti, vti, vti.Scalar, vti.ScalarRegClass>;
+    defm : VPatVC_XVV<"ivv", "IVV", vti, vti, i32, simm5_i32>;
+    defm : VPatVC_XVV<"vvv", "VVV", vti, vti, vti.Vector, vti.RegClass>;
+    if !ge(vti.SEW, 16) then {
+    defm : VPatVC_XV<"fv", "F" # vti.SEW # "V", vti,
+                     GetFTypeInfo<vti.SEW>.Scalar,
+                     GetFTypeInfo<vti.SEW>.ScalarRegClass, payload1>;
+    defm : VPatVC_XVV<"fvv", "F" # vti.SEW # "VV", vti, vti,
+                      GetFTypeInfo<vti.SEW>.Scalar,
+                      GetFTypeInfo<vti.SEW>.ScalarRegClass, payload1>;
+    }
+  }
+  foreach VtiToWti = AllWidenableIntVectors in {
+    defvar vti = VtiToWti.Vti;
+    defvar wti = VtiToWti.Wti;
+    defm : VPatVC_XVV<"xvw", "XVW", wti, vti, vti.Scalar, vti.ScalarRegClass>;
+    defm : VPatVC_XVV<"ivw", "IVW", wti, vti, i32, simm5_i32>;
+    defm : VPatVC_XVV<"vvw", "VVW", wti, vti, vti.Vector, vti.RegClass>;
+    if !ge(vti.SEW, 16) then {
+    defm : VPatVC_XVV<"fvw", "F" # vti.SEW # "VW", wti, vti,
+                      GetFTypeInfo<vti.SEW>.Scalar,
+                      GetFTypeInfo<vti.SEW>.ScalarRegClass, payload1>;
+    }
+  }
+}
+
 // Include the non-intrinsic ISel patterns
 include "RISCVInstrInfoVVLPatterns.td"
 include "RISCVInstrInfoVSDPatterns.td"

diff  --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x-rv64.ll
new file mode 100644
index 0000000000000..0698b458e7ca2
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x-rv64.ll
@@ -0,0 +1,159 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN:  llc -mtriple=riscv64 -mattr=+v,+xsfvcp \
+; RUN:    -verify-machineinstrs < %s | FileCheck %s
+
+define void @test_sf_vc_x_se_e64m1(i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.x.se.e64m1.i32.i64.i64(i32 3, i32 31, i32 31, i64 %rs1, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e64m1.i32.i64.i64(i32, i32, i32, i64, i64)
+
+define void @test_sf_vc_x_se_e64m2(i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.x.se.e64m2.i32.i64.i64(i32 3, i32 31, i32 31, i64 %rs1, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e64m2.i32.i64.i64(i32, i32, i32, i64, i64)
+
+define void @test_sf_vc_x_se_e64m4(i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.x.se.e64m4.i32.i64.i64(i32 3, i32 31, i32 31, i64 %rs1, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e64m4.i32.i64.i64(i32, i32, i32, i64, i64)
+
+define void @test_sf_vc_x_se_e64m8(i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.x.se.e64m8.i32.i64.i64(i32 3, i32 31, i32 31, i64 %rs1, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e64m8.i32.i64.i64(i32, i32, i32, i64, i64)
+
+define <vscale x 1 x i64> @test_sf_vc_v_x_se_e64m1(i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.x.se.nxv1i64.i32.i64.i64(i32 3, i32 31, i64 %rs1, i64 %vl)
+  ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.x.se.nxv1i64.i32.i64.i64(i32, i32, i64, i64)
+
+define <vscale x 2 x i64> @test_sf_vc_v_x_se_e64m2(i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.x.se.nxv2i64.i32.i64.i64(i32 3, i32 31, i64 %rs1, i64 %vl)
+  ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.x.se.nxv2i64.i32.i64.i64(i32, i32, i64, i64)
+
+define <vscale x 4 x i64> @test_sf_vc_v_x_se_e64m4(i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.x.se.nxv4i64.i32.i64.i64(i32 3, i32 31, i64 %rs1, i64 %vl)
+  ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.x.se.nxv4i64.i32.i64.i64(i32, i32, i64, i64)
+
+define <vscale x 8 x i64> @test_sf_vc_v_x_se_e64m8(i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.x.se.nxv8i64.i32.i64.i64(i32 3, i32 31, i64 %rs1, i64 %vl)
+  ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.x.se.nxv8i64.i32.i64.i64(i32, i32, i64, i64)
+
+define <vscale x 1 x i64> @test_sf_vc_v_x_e64m1(i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.x.nxv1i64.i32.i64.i64(i32 3, i32 31, i64 %rs1, i64 %vl)
+  ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.x.nxv1i64.i32.i64.i64(i32, i32, i64, i64)
+
+define <vscale x 2 x i64> @test_sf_vc_v_x_e64m2(i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.x.nxv2i64.i32.i64.i64(i32 3, i32 31, i64 %rs1, i64 %vl)
+  ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.x.nxv2i64.i32.i64.i64(i32, i32, i64, i64)
+
+define <vscale x 4 x i64> @test_sf_vc_v_x_e64m4(i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.x.nxv4i64.i32.i64.i64(i32 3, i32 31, i64 %rs1, i64 %vl)
+  ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.x.nxv4i64.i32.i64.i64(i32, i32, i64, i64)
+
+define <vscale x 8 x i64> @test_sf_vc_v_x_e64m8(i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.x.nxv8i64.i32.i64.i64(i32 3, i32 31, i64 %rs1, i64 %vl)
+  ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.x.nxv8i64.i32.i64.i64(i32, i32, i64, i64)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll
new file mode 100644
index 0000000000000..689654e4c26ba
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll
@@ -0,0 +1,1565 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN:  sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+xsfvcp \
+; RUN:    -verify-machineinstrs | FileCheck %s
+; RUN:  sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvcp \
+; RUN:    -verify-machineinstrs | FileCheck %s
+
+define void @test_sf_vc_x_se_e8mf8(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e8mf8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.x.se.e8mf8.i32.i8.iXLen(i32 3, i32 31, i32 31, i8 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e8mf8.i32.i8.iXLen(i32, i32, i32, i8, iXLen)
+
+define void @test_sf_vc_x_se_e8mf4(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e8mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.x.se.e8mf4.i32.i8.iXLen(i32 3, i32 31, i32 31, i8 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e8mf4.i32.i8.iXLen(i32, i32, i32, i8, iXLen)
+
+define void @test_sf_vc_x_se_e8mf2(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e8mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.x.se.e8mf2.i32.i8.iXLen(i32 3, i32 31, i32 31, i8 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e8mf2.i32.i8.iXLen(i32, i32, i32, i8, iXLen)
+
+define void @test_sf_vc_x_se_e8m1(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e8m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.x.se.e8m1.i32.i8.iXLen(i32 3, i32 31, i32 31, i8 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e8m1.i32.i8.iXLen(i32, i32, i32, i8, iXLen)
+
+define void @test_sf_vc_x_se_e8m2(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e8m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.x.se.e8m2.i32.i8.iXLen(i32 3, i32 31, i32 31, i8 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e8m2.i32.i8.iXLen(i32, i32, i32, i8, iXLen)
+
+define void @test_sf_vc_x_se_e8m4(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e8m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.x.se.e8m4.i32.i8.iXLen(i32 3, i32 31, i32 31, i8 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e8m4.i32.i8.iXLen(i32, i32, i32, i8, iXLen)
+
+define void @test_sf_vc_x_se_e8m8(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e8m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.x.se.e8m8.i32.i8.iXLen(i32 3, i32 31, i32 31, i8 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e8m8.i32.i8.iXLen(i32, i32, i32, i8, iXLen)
+
+define void @test_sf_vc_x_se_e16mf4(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.x.se.e16mf4.i32.i16.iXLen(i32 3, i32 31, i32 31, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e16mf4.i32.i16.iXLen(i32, i32, i32, i16, iXLen)
+
+define void @test_sf_vc_x_se_e16mf2(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.x.se.e16mf2.i32.i16.iXLen(i32 3, i32 31, i32 31, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e16mf2.i32.i16.iXLen(i32, i32, i32, i16, iXLen)
+
+define void @test_sf_vc_x_se_e16m1(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.x.se.e16m1.i32.i16.iXLen(i32 3, i32 31, i32 31, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e16m1.i32.i16.iXLen(i32, i32, i32, i16, iXLen)
+
+define void @test_sf_vc_x_se_e16m2(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.x.se.e16m2.i32.i16.iXLen(i32 3, i32 31, i32 31, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e16m2.i32.i16.iXLen(i32, i32, i32, i16, iXLen)
+
+define void @test_sf_vc_x_se_e16m4(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.x.se.e16m4.i32.i16.iXLen(i32 3, i32 31, i32 31, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e16m4.i32.i16.iXLen(i32, i32, i32, i16, iXLen)
+
+define void @test_sf_vc_x_se_e16m8(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.x.se.e16m8.i32.i16.iXLen(i32 3, i32 31, i32 31, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e16m8.i32.i16.iXLen(i32, i32, i32, i16, iXLen)
+
+define void @test_sf_vc_x_se_e32mf2(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.x.se.e32mf2.i32.i32.iXLen(i32 3, i32 31, i32 31, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e32mf2.i32.i32.iXLen(i32, i32, i32, i32, iXLen)
+
+define void @test_sf_vc_x_se_e32m1(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.x.se.e32m1.i32.i32.iXLen(i32 3, i32 31, i32 31, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e32m1.i32.i32.iXLen(i32, i32, i32, i32, iXLen)
+
+define void @test_sf_vc_x_se_e32m2(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.x.se.e32m2.i32.i32.iXLen(i32 3, i32 31, i32 31, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e32m2.i32.i32.iXLen(i32, i32, i32, i32, iXLen)
+
+define void @test_sf_vc_x_se_e32m4(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.x.se.e32m4.i32.i32.iXLen(i32 3, i32 31, i32 31, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e32m4.i32.i32.iXLen(i32, i32, i32, i32, iXLen)
+
+define void @test_sf_vc_x_se_e32m8(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.x.se.e32m8.i32.i32.iXLen(i32 3, i32 31, i32 31, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e32m8.i32.i32.iXLen(i32, i32, i32, i32, iXLen)
+
+define <vscale x 1 x i8> @test_sf_vc_v_x_se_e8mf8(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e8mf8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.x.se.nxv1i8.i32.i8.iXLen(i32 3, i32 31, i8 %rs1, iXLen %vl)
+  ret <vscale x 1 x i8> %0
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.x.se.nxv1i8.i32.i8.iXLen(i32, i32, i8, iXLen)
+
+define <vscale x 2 x i8> @test_sf_vc_v_x_se_e8mf4(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e8mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.x.se.nxv2i8.i32.i8.iXLen(i32 3, i32 31, i8 %rs1, iXLen %vl)
+  ret <vscale x 2 x i8> %0
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.x.se.nxv2i8.i32.i8.iXLen(i32, i32, i8, iXLen)
+
+define <vscale x 4 x i8> @test_sf_vc_v_x_se_e8mf2(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e8mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.x.se.nxv4i8.i32.i8.iXLen(i32 3, i32 31, i8 %rs1, iXLen %vl)
+  ret <vscale x 4 x i8> %0
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.x.se.nxv4i8.i32.i8.iXLen(i32, i32, i8, iXLen)
+
+define <vscale x 8 x i8> @test_sf_vc_v_x_se_e8m1(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e8m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.x.se.nxv8i8.i32.i8.iXLen(i32 3, i32 31, i8 %rs1, iXLen %vl)
+  ret <vscale x 8 x i8> %0
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.x.se.nxv8i8.i32.i8.iXLen(i32, i32, i8, iXLen)
+
+define <vscale x 16 x i8> @test_sf_vc_v_x_se_e8m2(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e8m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.x.se.nxv16i8.i32.i8.iXLen(i32 3, i32 31, i8 %rs1, iXLen %vl)
+  ret <vscale x 16 x i8> %0
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.x.se.nxv16i8.i32.i8.iXLen(i32, i32, i8, iXLen)
+
+define <vscale x 32 x i8> @test_sf_vc_v_x_se_e8m4(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e8m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.x.se.nxv32i8.i32.i8.iXLen(i32 3, i32 31, i8 %rs1, iXLen %vl)
+  ret <vscale x 32 x i8> %0
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.x.se.nxv32i8.i32.i8.iXLen(i32, i32, i8, iXLen)
+
+define <vscale x 64 x i8> @test_sf_vc_v_x_se_e8m8(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e8m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.x.se.nxv64i8.i32.i8.iXLen(i32 3, i32 31, i8 %rs1, iXLen %vl)
+  ret <vscale x 64 x i8> %0
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.x.se.nxv64i8.i32.i8.iXLen(i32, i32, i8, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_x_se_e16mf4(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.x.se.nxv1i16.i32.i16.iXLen(i32 3, i32 31, i16 %rs1, iXLen %vl)
+  ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.x.se.nxv1i16.i32.i16.iXLen(i32, i32, i16, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_x_se_e16mf2(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.x.se.nxv2i16.i32.i16.iXLen(i32 3, i32 31, i16 %rs1, iXLen %vl)
+  ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.x.se.nxv2i16.i32.i16.iXLen(i32, i32, i16, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_x_se_e16m1(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.x.se.nxv4i16.i32.i16.iXLen(i32 3, i32 31, i16 %rs1, iXLen %vl)
+  ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.x.se.nxv4i16.i32.i16.iXLen(i32, i32, i16, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_x_se_e16m2(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.x.se.nxv8i16.i32.i16.iXLen(i32 3, i32 31, i16 %rs1, iXLen %vl)
+  ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.x.se.nxv8i16.i32.i16.iXLen(i32, i32, i16, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_x_se_e16m4(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.x.se.nxv16i16.i32.i16.iXLen(i32 3, i32 31, i16 %rs1, iXLen %vl)
+  ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.x.se.nxv16i16.i32.i16.iXLen(i32, i32, i16, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_x_se_e16m8(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.x.se.nxv32i16.i32.i16.iXLen(i32 3, i32 31, i16 %rs1, iXLen %vl)
+  ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.x.se.nxv32i16.i32.i16.iXLen(i32, i32, i16, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_x_se_e32mf2(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.x.se.nxv1i32.i32.i32.iXLen(i32 3, i32 31, i32 %rs1, iXLen %vl)
+  ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.x.se.nxv1i32.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_x_se_e32m1(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.x.se.nxv2i32.i32.i32.iXLen(i32 3, i32 31, i32 %rs1, iXLen %vl)
+  ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.x.se.nxv2i32.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_x_se_e32m2(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.x.se.nxv4i32.i32.i32.iXLen(i32 3, i32 31, i32 %rs1, iXLen %vl)
+  ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.x.se.nxv4i32.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_x_se_e32m4(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.x.se.nxv8i32.i32.i32.iXLen(i32 3, i32 31, i32 %rs1, iXLen %vl)
+  ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.x.se.nxv8i32.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_x_se_e32m8(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.x.se.nxv16i32.i32.i32.iXLen(i32 3, i32 31, i32 %rs1, iXLen %vl)
+  ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.x.se.nxv16i32.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 1 x i8> @test_sf_vc_v_x_e8mf8(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e8mf8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.x.nxv1i8.i32.i8.iXLen(i32 3, i32 31, i8 %rs1, iXLen %vl)
+  ret <vscale x 1 x i8> %0
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.x.nxv1i8.i32.i8.iXLen(i32, i32, i8, iXLen)
+
+define <vscale x 2 x i8> @test_sf_vc_v_x_e8mf4(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e8mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.x.nxv2i8.i32.i8.iXLen(i32 3, i32 31, i8 %rs1, iXLen %vl)
+  ret <vscale x 2 x i8> %0
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.x.nxv2i8.i32.i8.iXLen(i32, i32, i8, iXLen)
+
+define <vscale x 4 x i8> @test_sf_vc_v_x_e8mf2(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e8mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.x.nxv4i8.i32.i8.iXLen(i32 3, i32 31, i8 %rs1, iXLen %vl)
+  ret <vscale x 4 x i8> %0
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.x.nxv4i8.i32.i8.iXLen(i32, i32, i8, iXLen)
+
+define <vscale x 8 x i8> @test_sf_vc_v_x_e8m1(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e8m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.x.nxv8i8.i32.i8.iXLen(i32 3, i32 31, i8 %rs1, iXLen %vl)
+  ret <vscale x 8 x i8> %0
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.x.nxv8i8.i32.i8.iXLen(i32, i32, i8, iXLen)
+
+define <vscale x 16 x i8> @test_sf_vc_v_x_e8m2(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e8m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.x.nxv16i8.i32.i8.iXLen(i32 3, i32 31, i8 %rs1, iXLen %vl)
+  ret <vscale x 16 x i8> %0
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.x.nxv16i8.i32.i8.iXLen(i32, i32, i8, iXLen)
+
+define <vscale x 32 x i8> @test_sf_vc_v_x_e8m4(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e8m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.x.nxv32i8.i32.i8.iXLen(i32 3, i32 31, i8 %rs1, iXLen %vl)
+  ret <vscale x 32 x i8> %0
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.x.nxv32i8.i32.i8.iXLen(i32, i32, i8, iXLen)
+
+define <vscale x 64 x i8> @test_sf_vc_v_x_e8m8(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e8m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.x.nxv64i8.i32.i8.iXLen(i32 3, i32 31, i8 %rs1, iXLen %vl)
+  ret <vscale x 64 x i8> %0
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.x.nxv64i8.i32.i8.iXLen(i32, i32, i8, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_x_e16mf4(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.x.nxv1i16.i32.i16.iXLen(i32 3, i32 31, i16 %rs1, iXLen %vl)
+  ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.x.nxv1i16.i32.i16.iXLen(i32, i32, i16, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_x_e16mf2(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.x.nxv2i16.i32.i16.iXLen(i32 3, i32 31, i16 %rs1, iXLen %vl)
+  ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.x.nxv2i16.i32.i16.iXLen(i32, i32, i16, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_x_e16m1(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.x.nxv4i16.i32.i16.iXLen(i32 3, i32 31, i16 %rs1, iXLen %vl)
+  ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.x.nxv4i16.i32.i16.iXLen(i32, i32, i16, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_x_e16m2(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.x.nxv8i16.i32.i16.iXLen(i32 3, i32 31, i16 %rs1, iXLen %vl)
+  ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.x.nxv8i16.i32.i16.iXLen(i32, i32, i16, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_x_e16m4(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.x.nxv16i16.i32.i16.iXLen(i32 3, i32 31, i16 %rs1, iXLen %vl)
+  ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.x.nxv16i16.i32.i16.iXLen(i32, i32, i16, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_x_e16m8(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.x.nxv32i16.i32.i16.iXLen(i32 3, i32 31, i16 %rs1, iXLen %vl)
+  ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.x.nxv32i16.i32.i16.iXLen(i32, i32, i16, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_x_e32mf2(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.x.nxv1i32.i32.i32.iXLen(i32 3, i32 31, i32 %rs1, iXLen %vl)
+  ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.x.nxv1i32.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_x_e32m1(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.x.nxv2i32.i32.i32.iXLen(i32 3, i32 31, i32 %rs1, iXLen %vl)
+  ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.x.nxv2i32.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_x_e32m2(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.x.nxv4i32.i32.i32.iXLen(i32 3, i32 31, i32 %rs1, iXLen %vl)
+  ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.x.nxv4i32.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_x_e32m4(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.x.nxv8i32.i32.i32.iXLen(i32 3, i32 31, i32 %rs1, iXLen %vl)
+  ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.x.nxv8i32.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_x_e32m8(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.x.nxv16i32.i32.i32.iXLen(i32 3, i32 31, i32 %rs1, iXLen %vl)
+  ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.x.nxv16i32.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define void @test_sf_vc_i_se_e8mf8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e8mf8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.i.se.e8mf8.i32.i32.iXLen(i32 3, i32 31, i32 31, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e8mf8.i32.i32.iXLen(i32, i32, i32, i32, iXLen)
+
+define void @test_sf_vc_i_se_e8mf4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e8mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.i.se.e8mf4.i32.i32.iXLen(i32 3, i32 31, i32 31, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e8mf4.i32.i32.iXLen(i32, i32, i32, i32, iXLen)
+
+define void @test_sf_vc_i_se_e8mf2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e8mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.i.se.e8mf2.i32.i32.iXLen(i32 3, i32 31, i32 31, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e8mf2.i32.i32.iXLen(i32, i32, i32, i32, iXLen)
+
+define void @test_sf_vc_i_se_e8m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e8m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.i.se.e8m1.i32.i32.iXLen(i32 3, i32 31, i32 31, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e8m1.i32.i32.iXLen(i32, i32, i32, i32, iXLen)
+
+define void @test_sf_vc_i_se_e8m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e8m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.i.se.e8m2.i32.i32.iXLen(i32 3, i32 31, i32 31, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e8m2.i32.i32.iXLen(i32, i32, i32, i32, iXLen)
+
+define void @test_sf_vc_i_se_e8m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e8m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.i.se.e8m4.i32.i32.iXLen(i32 3, i32 31, i32 31, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e8m4.i32.i32.iXLen(i32, i32, i32, i32, iXLen)
+
+define void @test_sf_vc_i_se_e8m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e8m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.i.se.e8m8.i32.i32.iXLen(i32 3, i32 31, i32 31, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e8m8.i32.i32.iXLen(i32, i32, i32, i32, iXLen)
+
+define void @test_sf_vc_i_se_e16mf4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.i.se.e16mf4.i32.i32.iXLen(i32 3, i32 31, i32 31, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e16mf4.i32.i32.iXLen(i32, i32, i32, i32, iXLen)
+
+define void @test_sf_vc_i_se_e16mf2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.i.se.e16mf2.i32.i32.iXLen(i32 3, i32 31, i32 31, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e16mf2.i32.i32.iXLen(i32, i32, i32, i32, iXLen)
+
+define void @test_sf_vc_i_se_e16m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.i.se.e16m1.i32.i32.iXLen(i32 3, i32 31, i32 31, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e16m1.i32.i32.iXLen(i32, i32, i32, i32, iXLen)
+
+define void @test_sf_vc_i_se_e16m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.i.se.e16m2.i32.i32.iXLen(i32 3, i32 31, i32 31, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e16m2.i32.i32.iXLen(i32, i32, i32, i32, iXLen)
+
+define void @test_sf_vc_i_se_e16m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.i.se.e16m4.i32.i32.iXLen(i32 3, i32 31, i32 31, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e16m4.i32.i32.iXLen(i32, i32, i32, i32, iXLen)
+
+define void @test_sf_vc_i_se_e16m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.i.se.e16m8.i32.i32.iXLen(i32 3, i32 31, i32 31, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e16m8.i32.i32.iXLen(i32, i32, i32, i32, iXLen)
+
+define void @test_sf_vc_i_se_e32mf2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.i.se.e32mf2.i32.i32.iXLen(i32 3, i32 31, i32 31, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e32mf2.i32.i32.iXLen(i32, i32, i32, i32, iXLen)
+
+define void @test_sf_vc_i_se_e32m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.i.se.e32m1.i32.i32.iXLen(i32 3, i32 31, i32 31, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e32m1.i32.i32.iXLen(i32, i32, i32, i32, iXLen)
+
+define void @test_sf_vc_i_se_e32m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.i.se.e32m2.i32.i32.iXLen(i32 3, i32 31, i32 31, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e32m2.i32.i32.iXLen(i32, i32, i32, i32, iXLen)
+
+define void @test_sf_vc_i_se_e32m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.i.se.e32m4.i32.i32.iXLen(i32 3, i32 31, i32 31, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e32m4.i32.i32.iXLen(i32, i32, i32, i32, iXLen)
+
+define void @test_sf_vc_i_se_e32m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.i.se.e32m8.i32.i32.iXLen(i32 3, i32 31, i32 31, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e32m8.i32.i32.iXLen(i32, i32, i32, i32, iXLen)
+
+define void @test_sf_vc_i_se_e64m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.i.se.e64m1.i32.i32.iXLen(i32 3, i32 31, i32 31, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e64m1.i32.i32.iXLen(i32, i32, i32, i32, iXLen)
+
+define void @test_sf_vc_i_se_e64m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.i.se.e64m2.i32.i32.iXLen(i32 3, i32 31, i32 31, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e64m2.i32.i32.iXLen(i32, i32, i32, i32, iXLen)
+
+define void @test_sf_vc_i_se_e64m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.i.se.e64m4.i32.i32.iXLen(i32 3, i32 31, i32 31, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e64m4.i32.i32.iXLen(i32, i32, i32, i32, iXLen)
+
+define void @test_sf_vc_i_se_e64m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.i.se.e64m8.i32.i32.iXLen(i32 3, i32 31, i32 31, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e64m8.i32.i32.iXLen(i32, i32, i32, i32, iXLen)
+
+define <vscale x 1 x i8> @test_sf_vc_v_i_se_e8mf8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e8mf8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.i.se.nxv1i8.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 1 x i8> %0
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.i.se.nxv1i8.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 2 x i8> @test_sf_vc_v_i_se_e8mf4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e8mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.i.se.nxv2i8.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 2 x i8> %0
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.i.se.nxv2i8.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 4 x i8> @test_sf_vc_v_i_se_e8mf2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e8mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.i.se.nxv4i8.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 4 x i8> %0
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.i.se.nxv4i8.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 8 x i8> @test_sf_vc_v_i_se_e8m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e8m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.i.se.nxv8i8.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 8 x i8> %0
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.i.se.nxv8i8.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 16 x i8> @test_sf_vc_v_i_se_e8m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e8m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.i.se.nxv16i8.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 16 x i8> %0
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.i.se.nxv16i8.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 32 x i8> @test_sf_vc_v_i_se_e8m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e8m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.i.se.nxv32i8.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 32 x i8> %0
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.i.se.nxv32i8.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 64 x i8> @test_sf_vc_v_i_se_e8m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e8m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.i.se.nxv64i8.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 64 x i8> %0
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.i.se.nxv64i8.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_i_se_e16mf4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.i.se.nxv1i16.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.i.se.nxv1i16.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_i_se_e16mf2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.i.se.nxv2i16.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.i.se.nxv2i16.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_i_se_e16m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.i.se.nxv4i16.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.i.se.nxv4i16.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_i_se_e16m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.i.se.nxv8i16.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.i.se.nxv8i16.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_i_se_e16m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.i.se.nxv16i16.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.i.se.nxv16i16.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_i_se_e16m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.i.se.nxv32i16.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.i.se.nxv32i16.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_i_se_e32mf2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.i.se.nxv1i32.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.i.se.nxv1i32.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_i_se_e32m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.i.se.nxv2i32.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.i.se.nxv2i32.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_i_se_e32m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.i.se.nxv4i32.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.i.se.nxv4i32.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_i_se_e32m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.i.se.nxv8i32.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.i.se.nxv8i32.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_i_se_e32m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.i.se.nxv16i32.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.i.se.nxv16i32.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_i_se_e64m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.i.se.nxv1i64.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.i.se.nxv1i64.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_i_se_e64m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.i.se.nxv2i64.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.i.se.nxv2i64.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_i_se_e64m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.i.se.nxv4i64.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.i.se.nxv4i64.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_i_se_e64m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.i.se.nxv8i64.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.i.se.nxv8i64.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 1 x i8> @test_sf_vc_v_i_e8mf8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e8mf8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.i.nxv1i8.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 1 x i8> %0
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.i.nxv1i8.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 2 x i8> @test_sf_vc_v_i_e8mf4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e8mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.i.nxv2i8.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 2 x i8> %0
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.i.nxv2i8.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 4 x i8> @test_sf_vc_v_i_e8mf2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e8mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.i.nxv4i8.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 4 x i8> %0
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.i.nxv4i8.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 8 x i8> @test_sf_vc_v_i_e8m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e8m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.i.nxv8i8.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 8 x i8> %0
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.i.nxv8i8.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 16 x i8> @test_sf_vc_v_i_e8m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e8m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.i.nxv16i8.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 16 x i8> %0
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.i.nxv16i8.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 32 x i8> @test_sf_vc_v_i_e8m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e8m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.i.nxv32i8.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 32 x i8> %0
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.i.nxv32i8.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 64 x i8> @test_sf_vc_v_i_e8m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e8m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.i.nxv64i8.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 64 x i8> %0
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.i.nxv64i8.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_i_e16mf4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.i.nxv1i16.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.i.nxv1i16.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_i_e16mf2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.i.nxv2i16.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.i.nxv2i16.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_i_e16m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.i.nxv4i16.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.i.nxv4i16.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_i_e16m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.i.nxv8i16.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.i.nxv8i16.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_i_e16m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.i.nxv16i16.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.i.nxv16i16.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_i_e16m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.i.nxv32i16.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.i.nxv32i16.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_i_e32mf2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.i.nxv1i32.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.i.nxv1i32.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_i_e32m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.i.nxv2i32.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.i.nxv2i32.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_i_e32m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.i.nxv4i32.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.i.nxv4i32.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_i_e32m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.i.nxv8i32.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.i.nxv8i32.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_i_e32m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.i.nxv16i32.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.i.nxv16i32.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_i_e64m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.i.nxv1i64.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.i.nxv1i64.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_i_e64m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.i.nxv2i64.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.i.nxv2i64.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_i_e64m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.i.nxv4i64.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.i.nxv4i64.i32.i32.iXLen(i32, i32, i32, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_i_e64m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.i.nxv8i64.i32.i32.iXLen(i32 3, i32 31, i32 10, iXLen %vl)
+  ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.i.nxv8i64.i32.i32.iXLen(i32, i32, i32, iXLen)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv-rv64.ll
new file mode 100644
index 0000000000000..8a5618f8f9553
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv-rv64.ll
@@ -0,0 +1,159 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN:  llc -mtriple=riscv64 -mattr=+v,+xsfvcp \
+; RUN:    -verify-machineinstrs < %s | FileCheck %s
+
+define void @test_sf_vc_xv_se_e64m1(<vscale x 1 x i64> %vs2, i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.i32.nxv1i64.i64.i64(i32 3, i32 31, <vscale x 1 x i64> %vs2, i64 %rs1, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.i32.nxv1i64.i64.i64(i32, i32, <vscale x 1 x i64>, i64, i64)
+
+define void @test_sf_vc_xv_se_e64m2(<vscale x 2 x i64> %vs2, i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.i32.nxv2i64.i64.i64(i32 3, i32 31, <vscale x 2 x i64> %vs2, i64 %rs1, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.i32.nxv2i64.i64.i64(i32, i32, <vscale x 2 x i64>, i64, i64)
+
+define void @test_sf_vc_xv_se_e64m4(<vscale x 4 x i64> %vs2, i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.i32.nxv4i64.i64.i64(i32 3, i32 31, <vscale x 4 x i64> %vs2, i64 %rs1, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.i32.nxv4i64.i64.i64(i32, i32, <vscale x 4 x i64>, i64, i64)
+
+define void @test_sf_vc_xv_se_e64m8(<vscale x 8 x i64> %vs2, i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.i32.nxv8i64.i64.i64(i32 3, i32 31, <vscale x 8 x i64> %vs2, i64 %rs1, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.i32.nxv8i64.i64.i64(i32, i32, <vscale x 8 x i64>, i64, i64)
+
+define <vscale x 1 x i64> @test_sf_vc_v_xv_se_e64m1(<vscale x 1 x i64> %vs2, i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xv.se.nxv1i64.i32.i64.i64(i32 3, <vscale x 1 x i64> %vs2, i64 %rs1, i64 %vl)
+  ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xv.se.nxv1i64.i32.i64.i64(i32, <vscale x 1 x i64>, i64, i64)
+
+define <vscale x 2 x i64> @test_sf_vc_v_xv_se_e64m2(<vscale x 2 x i64> %vs2, i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xv.se.nxv2i64.i32.i64.i64(i32 3, <vscale x 2 x i64> %vs2, i64 %rs1, i64 %vl)
+  ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xv.se.nxv2i64.i32.i64.i64(i32, <vscale x 2 x i64>, i64, i64)
+
+define <vscale x 4 x i64> @test_sf_vc_v_xv_se_e64m4(<vscale x 4 x i64> %vs2, i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xv.se.nxv4i64.i32.i64.i64(i32 3, <vscale x 4 x i64> %vs2, i64 %rs1, i64 %vl)
+  ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xv.se.nxv4i64.i32.i64.i64(i32, <vscale x 4 x i64>, i64, i64)
+
+define <vscale x 8 x i64> @test_sf_vc_v_xv_se_e64m8(<vscale x 8 x i64> %vs2, i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xv.se.nxv8i64.i32.i64.i64(i32 3, <vscale x 8 x i64> %vs2, i64 %rs1, i64 %vl)
+  ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xv.se.nxv8i64.i32.i64.i64(i32, <vscale x 8 x i64>, i64, i64)
+
+define <vscale x 1 x i64> @test_sf_vc_v_xv_e64m1(<vscale x 1 x i64> %vs2, i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xv.nxv1i64.i32.i64.i64(i32 3, <vscale x 1 x i64> %vs2, i64 %rs1, i64 %vl)
+  ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xv.nxv1i64.i32.i64.i64(i32, <vscale x 1 x i64>, i64, i64)
+
+define <vscale x 2 x i64> @test_sf_vc_v_xv_e64m2(<vscale x 2 x i64> %vs2, i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xv.nxv2i64.i32.i64.i64(i32 3, <vscale x 2 x i64> %vs2, i64 %rs1, i64 %vl)
+  ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xv.nxv2i64.i32.i64.i64(i32, <vscale x 2 x i64>, i64, i64)
+
+define <vscale x 4 x i64> @test_sf_vc_v_xv_e64m4(<vscale x 4 x i64> %vs2, i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xv.nxv4i64.i32.i64.i64(i32 3, <vscale x 4 x i64> %vs2, i64 %rs1, i64 %vl)
+  ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xv.nxv4i64.i32.i64.i64(i32, <vscale x 4 x i64>, i64, i64)
+
+define <vscale x 8 x i64> @test_sf_vc_v_xv_e64m8(<vscale x 8 x i64> %vs2, i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xv.nxv8i64.i32.i64.i64(i32 3, <vscale x 8 x i64> %vs2, i64 %rs1, i64 %vl)
+  ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xv.nxv8i64.i32.i64.i64(i32, <vscale x 8 x i64>, i64, i64)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv.ll
new file mode 100644
index 0000000000000..d42a1a3bb3d45
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv.ll
@@ -0,0 +1,3008 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN:  sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+xsfvcp \
+; RUN:    -verify-machineinstrs | FileCheck %s
+; RUN:  sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+xsfvcp \
+; RUN:    -verify-machineinstrs | FileCheck %s
+
+define void @test_sf_vc_vv_se_e8mf8(<vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e8mf8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.i32.nxv1i8.nxv1i8.iXLen(i32 3, i32 31, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.i32.nxv1i8.nxv1i8.iXLen(i32, i32, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
+
+define void @test_sf_vc_vv_se_e8mf4(<vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e8mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.i32.nxv2i8.nxv2i8.iXLen(i32 3, i32 31, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.i32.nxv2i8.nxv2i8.iXLen(i32, i32, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
+
+define void @test_sf_vc_vv_se_e8mf2(<vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e8mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.i32.nxv4i8.nxv4i8.iXLen(i32 3, i32 31, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.i32.nxv4i8.nxv4i8.iXLen(i32, i32, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
+
+define void @test_sf_vc_vv_se_e8m1(<vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e8m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.i32.nxv8i8.nxv8i8.iXLen(i32 3, i32 31, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.i32.nxv8i8.nxv8i8.iXLen(i32, i32, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
+
+define void @test_sf_vc_vv_se_e8m2(<vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e8m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.i32.nxv16i8.nxv16i8.iXLen(i32 3, i32 31, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.i32.nxv16i8.nxv16i8.iXLen(i32, i32, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
+
+define void @test_sf_vc_vv_se_e8m4(<vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e8m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.i32.nxv32i8.nxv32i8.iXLen(i32 3, i32 31, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.i32.nxv32i8.nxv32i8.iXLen(i32, i32, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
+
+define void @test_sf_vc_vv_se_e8m8(<vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e8m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.i32.nxv64i8.nxv64i8.iXLen(i32 3, i32 31, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.i32.nxv64i8.nxv64i8.iXLen(i32, i32, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
+
+define void @test_sf_vc_vv_se_e16mf4(<vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.i32.nxv1i16.nxv1i16.iXLen(i32 3, i32 31, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.i32.nxv1i16.nxv1i16.iXLen(i32, i32, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
+
+define void @test_sf_vc_vv_se_e16mf2(<vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.i32.nxv2i16.nxv2i16.iXLen(i32 3, i32 31, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.i32.nxv2i16.nxv2i16.iXLen(i32, i32, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
+
+define void @test_sf_vc_vv_se_e16m1(<vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.i32.nxv4i16.nxv4i16.iXLen(i32 3, i32 31, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.i32.nxv4i16.nxv4i16.iXLen(i32, i32, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
+
+define void @test_sf_vc_vv_se_e16m2(<vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.i32.nxv8i16.nxv8i16.iXLen(i32 3, i32 31, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.i32.nxv8i16.nxv8i16.iXLen(i32, i32, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
+
+define void @test_sf_vc_vv_se_e16m4(<vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.i32.nxv16i16.nxv16i16.iXLen(i32 3, i32 31, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.i32.nxv16i16.nxv16i16.iXLen(i32, i32, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
+
+define void @test_sf_vc_vv_se_e16m8(<vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.i32.nxv32i16.nxv32i16.iXLen(i32 3, i32 31, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.i32.nxv32i16.nxv32i16.iXLen(i32, i32, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
+
+define void @test_sf_vc_vv_se_e32mf2(<vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.i32.nxv1i32.nxv1i32.iXLen(i32 3, i32 31, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.i32.nxv1i32.nxv1i32.iXLen(i32, i32, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
+
+define void @test_sf_vc_vv_se_e32m1(<vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.i32.nxv2i32.nxv2i32.iXLen(i32 3, i32 31, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.i32.nxv2i32.nxv2i32.iXLen(i32, i32, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
+
+define void @test_sf_vc_vv_se_e32m2(<vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.i32.nxv4i32.nxv4i32.iXLen(i32 3, i32 31, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.i32.nxv4i32.nxv4i32.iXLen(i32, i32, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
+
+define void @test_sf_vc_vv_se_e32m4(<vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.i32.nxv8i32.nxv8i32.iXLen(i32 3, i32 31, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.i32.nxv8i32.nxv8i32.iXLen(i32, i32, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
+
+define void @test_sf_vc_vv_se_e32m8(<vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.i32.nxv16i32.nxv16i32.iXLen(i32 3, i32 31, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.i32.nxv16i32.nxv16i32.iXLen(i32, i32, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen)
+
+define void @test_sf_vc_vv_se_e64m1(<vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.i32.nxv1i64.nxv1i64.iXLen(i32 3, i32 31, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.i32.nxv1i64.nxv1i64.iXLen(i32, i32, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen)
+
+define void @test_sf_vc_vv_se_e64m2(<vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.i32.nxv2i64.nxv2i64.iXLen(i32 3, i32 31, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.i32.nxv2i64.nxv2i64.iXLen(i32, i32, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen)
+
+define void @test_sf_vc_vv_se_e64m4(<vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.i32.nxv4i64.nxv4i64.iXLen(i32 3, i32 31, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.i32.nxv4i64.nxv4i64.iXLen(i32, i32, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen)
+
+define void @test_sf_vc_vv_se_e64m8(<vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.vv 3, 31, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vv.se.i32.nxv8i64.nxv8i64.iXLen(i32 3, i32 31, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.i32.nxv8i64.nxv8i64.iXLen(i32, i32, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen)
+
+define <vscale x 1 x i8> @test_sf_vc_v_vv_se_e8mf8(<vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e8mf8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv1i8.i32.nxv1i8.iXLen(i32 3, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
+  ret <vscale x 1 x i8> %0
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv1i8.i32.nxv1i8.iXLen(i32, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
+
+define <vscale x 2 x i8> @test_sf_vc_v_vv_se_e8mf4(<vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e8mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv2i8.i32.nxv2i8.iXLen(i32 3, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
+  ret <vscale x 2 x i8> %0
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv2i8.i32.nxv2i8.iXLen(i32, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
+
+define <vscale x 4 x i8> @test_sf_vc_v_vv_se_e8mf2(<vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e8mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv4i8.i32.nxv4i8.iXLen(i32 3, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
+  ret <vscale x 4 x i8> %0
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv4i8.i32.nxv4i8.iXLen(i32, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
+
+define <vscale x 8 x i8> @test_sf_vc_v_vv_se_e8m1(<vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e8m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv8i8.i32.nxv8i8.iXLen(i32 3, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
+  ret <vscale x 8 x i8> %0
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv8i8.i32.nxv8i8.iXLen(i32, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
+
+define <vscale x 16 x i8> @test_sf_vc_v_vv_se_e8m2(<vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e8m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv16i8.i32.nxv16i8.iXLen(i32 3, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
+  ret <vscale x 16 x i8> %0
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv16i8.i32.nxv16i8.iXLen(i32, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
+
+define <vscale x 32 x i8> @test_sf_vc_v_vv_se_e8m4(<vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e8m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv32i8.i32.nxv32i8.iXLen(i32 3, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
+  ret <vscale x 32 x i8> %0
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv32i8.i32.nxv32i8.iXLen(i32, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
+
+define <vscale x 64 x i8> @test_sf_vc_v_vv_se_e8m8(<vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e8m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv64i8.i32.nxv64i8.iXLen(i32 3, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl)
+  ret <vscale x 64 x i8> %0
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv64i8.i32.nxv64i8.iXLen(i32, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_vv_se_e16mf4(<vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv1i16.i32.nxv1i16.iXLen(i32 3, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
+  ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv1i16.i32.nxv1i16.iXLen(i32, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_vv_se_e16mf2(<vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv2i16.i32.nxv2i16.iXLen(i32 3, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
+  ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv2i16.i32.nxv2i16.iXLen(i32, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_vv_se_e16m1(<vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv4i16.i32.nxv4i16.iXLen(i32 3, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
+  ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv4i16.i32.nxv4i16.iXLen(i32, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_vv_se_e16m2(<vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv8i16.i32.nxv8i16.iXLen(i32 3, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
+  ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv8i16.i32.nxv8i16.iXLen(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_vv_se_e16m4(<vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv16i16.i32.nxv16i16.iXLen(i32 3, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
+  ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv16i16.i32.nxv16i16.iXLen(i32, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_vv_se_e16m8(<vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv32i16.i32.nxv32i16.iXLen(i32 3, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl)
+  ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv32i16.i32.nxv32i16.iXLen(i32, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_vv_se_e32mf2(<vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv1i32.i32.nxv1i32.iXLen(i32 3, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
+  ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv1i32.i32.nxv1i32.iXLen(i32, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_vv_se_e32m1(<vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv2i32.i32.nxv2i32.iXLen(i32 3, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
+  ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv2i32.i32.nxv2i32.iXLen(i32, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_vv_se_e32m2(<vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv4i32.i32.nxv4i32.iXLen(i32 3, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
+  ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv4i32.i32.nxv4i32.iXLen(i32, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_vv_se_e32m4(<vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv8i32.i32.nxv8i32.iXLen(i32 3, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
+  ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv8i32.i32.nxv8i32.iXLen(i32, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_vv_se_e32m8(<vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv16i32.i32.nxv16i32.iXLen(i32 3, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl)
+  ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv16i32.i32.nxv16i32.iXLen(i32, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_vv_se_e64m1(<vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv1i64.i32.nxv1i64.iXLen(i32 3, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl)
+  ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv1i64.i32.nxv1i64.iXLen(i32, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_vv_se_e64m2(<vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv2i64.i32.nxv2i64.iXLen(i32 3, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl)
+  ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv2i64.i32.nxv2i64.iXLen(i32, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_vv_se_e64m4(<vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv4i64.i32.nxv4i64.iXLen(i32 3, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl)
+  ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv4i64.i32.nxv4i64.iXLen(i32, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_vv_se_e64m8(<vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv8i64.i32.nxv8i64.iXLen(i32 3, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl)
+  ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv8i64.i32.nxv8i64.iXLen(i32, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen)
+
+define <vscale x 1 x i8> @test_sf_vc_v_vv_e8mf8(<vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e8mf8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vv.nxv1i8.i32.nxv1i8.iXLen(i32 3, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
+  ret <vscale x 1 x i8> %0
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vv.nxv1i8.i32.nxv1i8.iXLen(i32, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
+
+define <vscale x 2 x i8> @test_sf_vc_v_vv_e8mf4(<vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e8mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vv.nxv2i8.i32.nxv2i8.iXLen(i32 3, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
+  ret <vscale x 2 x i8> %0
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vv.nxv2i8.i32.nxv2i8.iXLen(i32, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
+
+define <vscale x 4 x i8> @test_sf_vc_v_vv_e8mf2(<vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e8mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vv.nxv4i8.i32.nxv4i8.iXLen(i32 3, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
+  ret <vscale x 4 x i8> %0
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vv.nxv4i8.i32.nxv4i8.iXLen(i32, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
+
+define <vscale x 8 x i8> @test_sf_vc_v_vv_e8m1(<vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e8m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vv.nxv8i8.i32.nxv8i8.iXLen(i32 3, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
+  ret <vscale x 8 x i8> %0
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vv.nxv8i8.i32.nxv8i8.iXLen(i32, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
+
+define <vscale x 16 x i8> @test_sf_vc_v_vv_e8m2(<vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e8m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vv.nxv16i8.i32.nxv16i8.iXLen(i32 3, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
+  ret <vscale x 16 x i8> %0
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vv.nxv16i8.i32.nxv16i8.iXLen(i32, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
+
+define <vscale x 32 x i8> @test_sf_vc_v_vv_e8m4(<vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e8m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vv.nxv32i8.i32.nxv32i8.iXLen(i32 3, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
+  ret <vscale x 32 x i8> %0
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vv.nxv32i8.i32.nxv32i8.iXLen(i32, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
+
+define <vscale x 64 x i8> @test_sf_vc_v_vv_e8m8(<vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e8m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vv.nxv64i8.i32.nxv64i8.iXLen(i32 3, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl)
+  ret <vscale x 64 x i8> %0
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vv.nxv64i8.i32.nxv64i8.iXLen(i32, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_vv_e16mf4(<vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vv.nxv1i16.i32.nxv1i16.iXLen(i32 3, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
+  ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vv.nxv1i16.i32.nxv1i16.iXLen(i32, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_vv_e16mf2(<vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vv.nxv2i16.i32.nxv2i16.iXLen(i32 3, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
+  ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vv.nxv2i16.i32.nxv2i16.iXLen(i32, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_vv_e16m1(<vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vv.nxv4i16.i32.nxv4i16.iXLen(i32 3, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
+  ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vv.nxv4i16.i32.nxv4i16.iXLen(i32, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_vv_e16m2(<vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vv.nxv8i16.i32.nxv8i16.iXLen(i32 3, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
+  ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vv.nxv8i16.i32.nxv8i16.iXLen(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_vv_e16m4(<vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vv.nxv16i16.i32.nxv16i16.iXLen(i32 3, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
+  ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vv.nxv16i16.i32.nxv16i16.iXLen(i32, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_vv_e16m8(<vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vv.nxv32i16.i32.nxv32i16.iXLen(i32 3, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl)
+  ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vv.nxv32i16.i32.nxv32i16.iXLen(i32, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_vv_e32mf2(<vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vv.nxv1i32.i32.nxv1i32.iXLen(i32 3, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
+  ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vv.nxv1i32.i32.nxv1i32.iXLen(i32, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_vv_e32m1(<vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vv.nxv2i32.i32.nxv2i32.iXLen(i32 3, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
+  ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vv.nxv2i32.i32.nxv2i32.iXLen(i32, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_vv_e32m2(<vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vv.nxv4i32.i32.nxv4i32.iXLen(i32 3, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
+  ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vv.nxv4i32.i32.nxv4i32.iXLen(i32, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_vv_e32m4(<vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vv.nxv8i32.i32.nxv8i32.iXLen(i32 3, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
+  ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vv.nxv8i32.i32.nxv8i32.iXLen(i32, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_vv_e32m8(<vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vv.nxv16i32.i32.nxv16i32.iXLen(i32 3, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl)
+  ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vv.nxv16i32.i32.nxv16i32.iXLen(i32, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_vv_e64m1(<vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vv.nxv1i64.i32.nxv1i64.iXLen(i32 3, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl)
+  ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vv.nxv1i64.i32.nxv1i64.iXLen(i32, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_vv_e64m2(<vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vv.nxv2i64.i32.nxv2i64.iXLen(i32 3, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl)
+  ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vv.nxv2i64.i32.nxv2i64.iXLen(i32, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_vv_e64m4(<vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vv.nxv4i64.i32.nxv4i64.iXLen(i32 3, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl)
+  ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vv.nxv4i64.i32.nxv4i64.iXLen(i32, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_vv_e64m8(<vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vv 3, v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vv.nxv8i64.i32.nxv8i64.iXLen(i32 3, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl)
+  ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vv.nxv8i64.i32.nxv8i64.iXLen(i32, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen)
+
+define void @test_sf_vc_xv_se_e8mf8(<vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e8mf8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.i32.nxv1i8.i8.iXLen(i32 3, i32 31, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.i32.nxv1i8.i8.iXLen(i32, i32, <vscale x 1 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xv_se_e8mf4(<vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e8mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.i32.nxv2i8.i8.iXLen(i32 3, i32 31, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.i32.nxv2i8.i8.iXLen(i32, i32, <vscale x 2 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xv_se_e8mf2(<vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e8mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.i32.nxv4i8.i8.iXLen(i32 3, i32 31, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.i32.nxv4i8.i8.iXLen(i32, i32, <vscale x 4 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xv_se_e8m1(<vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e8m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.i32.nxv8i8.i8.iXLen(i32 3, i32 31, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.i32.nxv8i8.i8.iXLen(i32, i32, <vscale x 8 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xv_se_e8m2(<vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e8m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.i32.nxv16i8.i8.iXLen(i32 3, i32 31, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.i32.nxv16i8.i8.iXLen(i32, i32, <vscale x 16 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xv_se_e8m4(<vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e8m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.i32.nxv32i8.i8.iXLen(i32 3, i32 31, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.i32.nxv32i8.i8.iXLen(i32, i32, <vscale x 32 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xv_se_e8m8(<vscale x 64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e8m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.i32.nxv64i8.i8.iXLen(i32 3, i32 31, <vscale x 64 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.i32.nxv64i8.i8.iXLen(i32, i32, <vscale x 64 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xv_se_e16mf4(<vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.i32.nxv1i16.i16.iXLen(i32 3, i32 31, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.i32.nxv1i16.i16.iXLen(i32, i32, <vscale x 1 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xv_se_e16mf2(<vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.i32.nxv2i16.i16.iXLen(i32 3, i32 31, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.i32.nxv2i16.i16.iXLen(i32, i32, <vscale x 2 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xv_se_e16m1(<vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.i32.nxv4i16.i16.iXLen(i32 3, i32 31, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.i32.nxv4i16.i16.iXLen(i32, i32, <vscale x 4 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xv_se_e16m2(<vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.i32.nxv8i16.i16.iXLen(i32 3, i32 31, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.i32.nxv8i16.i16.iXLen(i32, i32, <vscale x 8 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xv_se_e16m4(<vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.i32.nxv16i16.i16.iXLen(i32 3, i32 31, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.i32.nxv16i16.i16.iXLen(i32, i32, <vscale x 16 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xv_se_e16m8(<vscale x 32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.i32.nxv32i16.i16.iXLen(i32 3, i32 31, <vscale x 32 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.i32.nxv32i16.i16.iXLen(i32, i32, <vscale x 32 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xv_se_e32mf2(<vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.i32.nxv1i32.i32.iXLen(i32 3, i32 31, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.i32.nxv1i32.i32.iXLen(i32, i32, <vscale x 1 x i32>, i32, iXLen)
+
+define void @test_sf_vc_xv_se_e32m1(<vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.i32.nxv2i32.i32.iXLen(i32 3, i32 31, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.i32.nxv2i32.i32.iXLen(i32, i32, <vscale x 2 x i32>, i32, iXLen)
+
+define void @test_sf_vc_xv_se_e32m2(<vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.i32.nxv4i32.i32.iXLen(i32 3, i32 31, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.i32.nxv4i32.i32.iXLen(i32, i32, <vscale x 4 x i32>, i32, iXLen)
+
+define void @test_sf_vc_xv_se_e32m4(<vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.i32.nxv8i32.i32.iXLen(i32 3, i32 31, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.i32.nxv8i32.i32.iXLen(i32, i32, <vscale x 8 x i32>, i32, iXLen)
+
+define void @test_sf_vc_xv_se_e32m8(<vscale x 16 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xv.se.i32.nxv16i32.i32.iXLen(i32 3, i32 31, <vscale x 16 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.i32.nxv16i32.i32.iXLen(i32, i32, <vscale x 16 x i32>, i32, iXLen)
+
+define <vscale x 1 x i8> @test_sf_vc_v_xv_se_e8mf8(<vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e8mf8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv1i8.i32.i8.iXLen(i32 3, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 1 x i8> %0
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv1i8.i32.i8.iXLen(i32, <vscale x 1 x i8>, i8, iXLen)
+
+define <vscale x 2 x i8> @test_sf_vc_v_xv_se_e8mf4(<vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e8mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv2i8.i32.i8.iXLen(i32 3, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 2 x i8> %0
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv2i8.i32.i8.iXLen(i32, <vscale x 2 x i8>, i8, iXLen)
+
+define <vscale x 4 x i8> @test_sf_vc_v_xv_se_e8mf2(<vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e8mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv4i8.i32.i8.iXLen(i32 3, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 4 x i8> %0
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv4i8.i32.i8.iXLen(i32, <vscale x 4 x i8>, i8, iXLen)
+
+define <vscale x 8 x i8> @test_sf_vc_v_xv_se_e8m1(<vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e8m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv8i8.i32.i8.iXLen(i32 3, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 8 x i8> %0
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv8i8.i32.i8.iXLen(i32, <vscale x 8 x i8>, i8, iXLen)
+
+define <vscale x 16 x i8> @test_sf_vc_v_xv_se_e8m2(<vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e8m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv16i8.i32.i8.iXLen(i32 3, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 16 x i8> %0
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv16i8.i32.i8.iXLen(i32, <vscale x 16 x i8>, i8, iXLen)
+
+define <vscale x 32 x i8> @test_sf_vc_v_xv_se_e8m4(<vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e8m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv32i8.i32.i8.iXLen(i32 3, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 32 x i8> %0
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv32i8.i32.i8.iXLen(i32, <vscale x 32 x i8>, i8, iXLen)
+
+define <vscale x 64 x i8> @test_sf_vc_v_xv_se_e8m8(<vscale x 64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e8m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv64i8.i32.i8.iXLen(i32 3, <vscale x 64 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 64 x i8> %0
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv64i8.i32.i8.iXLen(i32, <vscale x 64 x i8>, i8, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_xv_se_e16mf4(<vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv1i16.i32.i16.iXLen(i32 3, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv1i16.i32.i16.iXLen(i32, <vscale x 1 x i16>, i16, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_xv_se_e16mf2(<vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv2i16.i32.i16.iXLen(i32 3, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv2i16.i32.i16.iXLen(i32, <vscale x 2 x i16>, i16, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_xv_se_e16m1(<vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv4i16.i32.i16.iXLen(i32 3, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv4i16.i32.i16.iXLen(i32, <vscale x 4 x i16>, i16, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_xv_se_e16m2(<vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv8i16.i32.i16.iXLen(i32 3, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv8i16.i32.i16.iXLen(i32, <vscale x 8 x i16>, i16, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_xv_se_e16m4(<vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv16i16.i32.i16.iXLen(i32 3, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv16i16.i32.i16.iXLen(i32, <vscale x 16 x i16>, i16, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_xv_se_e16m8(<vscale x 32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv32i16.i32.i16.iXLen(i32 3, <vscale x 32 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv32i16.i32.i16.iXLen(i32, <vscale x 32 x i16>, i16, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_xv_se_e32mf2(<vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv1i32.i32.i32.iXLen(i32 3, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv1i32.i32.i32.iXLen(i32, <vscale x 1 x i32>, i32, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_xv_se_e32m1(<vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv2i32.i32.i32.iXLen(i32 3, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv2i32.i32.i32.iXLen(i32, <vscale x 2 x i32>, i32, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_xv_se_e32m2(<vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv4i32.i32.i32.iXLen(i32 3, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv4i32.i32.i32.iXLen(i32, <vscale x 4 x i32>, i32, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_xv_se_e32m4(<vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv8i32.i32.i32.iXLen(i32 3, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv8i32.i32.i32.iXLen(i32, <vscale x 8 x i32>, i32, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_xv_se_e32m8(<vscale x 16 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv16i32.i32.i32.iXLen(i32 3, <vscale x 16 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv16i32.i32.i32.iXLen(i32, <vscale x 16 x i32>, i32, iXLen)
+
+define <vscale x 1 x i8> @test_sf_vc_v_xv_e8mf8(<vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e8mf8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xv.nxv1i8.i32.i8.iXLen(i32 3, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 1 x i8> %0
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xv.nxv1i8.i32.i8.iXLen(i32, <vscale x 1 x i8>, i8, iXLen)
+
+define <vscale x 2 x i8> @test_sf_vc_v_xv_e8mf4(<vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e8mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xv.nxv2i8.i32.i8.iXLen(i32 3, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 2 x i8> %0
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xv.nxv2i8.i32.i8.iXLen(i32, <vscale x 2 x i8>, i8, iXLen)
+
+define <vscale x 4 x i8> @test_sf_vc_v_xv_e8mf2(<vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e8mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xv.nxv4i8.i32.i8.iXLen(i32 3, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 4 x i8> %0
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xv.nxv4i8.i32.i8.iXLen(i32, <vscale x 4 x i8>, i8, iXLen)
+
+define <vscale x 8 x i8> @test_sf_vc_v_xv_e8m1(<vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e8m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xv.nxv8i8.i32.i8.iXLen(i32 3, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 8 x i8> %0
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xv.nxv8i8.i32.i8.iXLen(i32, <vscale x 8 x i8>, i8, iXLen)
+
+define <vscale x 16 x i8> @test_sf_vc_v_xv_e8m2(<vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e8m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xv.nxv16i8.i32.i8.iXLen(i32 3, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 16 x i8> %0
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xv.nxv16i8.i32.i8.iXLen(i32, <vscale x 16 x i8>, i8, iXLen)
+
+define <vscale x 32 x i8> @test_sf_vc_v_xv_e8m4(<vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e8m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xv.nxv32i8.i32.i8.iXLen(i32 3, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 32 x i8> %0
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xv.nxv32i8.i32.i8.iXLen(i32, <vscale x 32 x i8>, i8, iXLen)
+
+define <vscale x 64 x i8> @test_sf_vc_v_xv_e8m8(<vscale x 64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e8m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xv.nxv64i8.i32.i8.iXLen(i32 3, <vscale x 64 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 64 x i8> %0
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xv.nxv64i8.i32.i8.iXLen(i32, <vscale x 64 x i8>, i8, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_xv_e16mf4(<vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xv.nxv1i16.i32.i16.iXLen(i32 3, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xv.nxv1i16.i32.i16.iXLen(i32, <vscale x 1 x i16>, i16, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_xv_e16mf2(<vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xv.nxv2i16.i32.i16.iXLen(i32 3, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xv.nxv2i16.i32.i16.iXLen(i32, <vscale x 2 x i16>, i16, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_xv_e16m1(<vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xv.nxv4i16.i32.i16.iXLen(i32 3, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xv.nxv4i16.i32.i16.iXLen(i32, <vscale x 4 x i16>, i16, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_xv_e16m2(<vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xv.nxv8i16.i32.i16.iXLen(i32 3, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xv.nxv8i16.i32.i16.iXLen(i32, <vscale x 8 x i16>, i16, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_xv_e16m4(<vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xv.nxv16i16.i32.i16.iXLen(i32 3, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xv.nxv16i16.i32.i16.iXLen(i32, <vscale x 16 x i16>, i16, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_xv_e16m8(<vscale x 32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xv.nxv32i16.i32.i16.iXLen(i32 3, <vscale x 32 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xv.nxv32i16.i32.i16.iXLen(i32, <vscale x 32 x i16>, i16, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_xv_e32mf2(<vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xv.nxv1i32.i32.i32.iXLen(i32 3, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xv.nxv1i32.i32.i32.iXLen(i32, <vscale x 1 x i32>, i32, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_xv_e32m1(<vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xv.nxv2i32.i32.i32.iXLen(i32 3, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xv.nxv2i32.i32.i32.iXLen(i32, <vscale x 2 x i32>, i32, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_xv_e32m2(<vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xv.nxv4i32.i32.i32.iXLen(i32 3, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xv.nxv4i32.i32.i32.iXLen(i32, <vscale x 4 x i32>, i32, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_xv_e32m4(<vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xv.nxv8i32.i32.i32.iXLen(i32 3, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xv.nxv8i32.i32.i32.iXLen(i32, <vscale x 8 x i32>, i32, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_xv_e32m8(<vscale x 16 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xv.nxv16i32.i32.i32.iXLen(i32 3, <vscale x 16 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xv.nxv16i32.i32.i32.iXLen(i32, <vscale x 16 x i32>, i32, iXLen)
+
+define void @test_sf_vc_iv_se_e8mf8(<vscale x 1 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e8mf8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.i32.nxv1i8.i32.iXLen(i32 3, i32 31, <vscale x 1 x i8> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.i32.nxv1i8.i32.iXLen(i32, i32, <vscale x 1 x i8>, i32, iXLen)
+
+define void @test_sf_vc_iv_se_e8mf4(<vscale x 2 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e8mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.i32.nxv2i8.i32.iXLen(i32 3, i32 31, <vscale x 2 x i8> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.i32.nxv2i8.i32.iXLen(i32, i32, <vscale x 2 x i8>, i32, iXLen)
+
+define void @test_sf_vc_iv_se_e8mf2(<vscale x 4 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e8mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.i32.nxv4i8.i32.iXLen(i32 3, i32 31, <vscale x 4 x i8> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.i32.nxv4i8.i32.iXLen(i32, i32, <vscale x 4 x i8>, i32, iXLen)
+
+define void @test_sf_vc_iv_se_e8m1(<vscale x 8 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e8m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.i32.nxv8i8.i32.iXLen(i32 3, i32 31, <vscale x 8 x i8> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.i32.nxv8i8.i32.iXLen(i32, i32, <vscale x 8 x i8>, i32, iXLen)
+
+define void @test_sf_vc_iv_se_e8m2(<vscale x 16 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e8m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.i32.nxv16i8.i32.iXLen(i32 3, i32 31, <vscale x 16 x i8> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.i32.nxv16i8.i32.iXLen(i32, i32, <vscale x 16 x i8>, i32, iXLen)
+
+define void @test_sf_vc_iv_se_e8m4(<vscale x 32 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e8m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.i32.nxv32i8.i32.iXLen(i32 3, i32 31, <vscale x 32 x i8> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.i32.nxv32i8.i32.iXLen(i32, i32, <vscale x 32 x i8>, i32, iXLen)
+
+define void @test_sf_vc_iv_se_e8m8(<vscale x 64 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e8m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.i32.nxv64i8.i32.iXLen(i32 3, i32 31, <vscale x 64 x i8> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.i32.nxv64i8.i32.iXLen(i32, i32, <vscale x 64 x i8>, i32, iXLen)
+
+define void @test_sf_vc_iv_se_e16mf4(<vscale x 1 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.i32.nxv1i16.i32.iXLen(i32 3, i32 31, <vscale x 1 x i16> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.i32.nxv1i16.i32.iXLen(i32, i32, <vscale x 1 x i16>, i32, iXLen)
+
+define void @test_sf_vc_iv_se_e16mf2(<vscale x 2 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.i32.nxv2i16.i32.iXLen(i32 3, i32 31, <vscale x 2 x i16> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.i32.nxv2i16.i32.iXLen(i32, i32, <vscale x 2 x i16>, i32, iXLen)
+
+define void @test_sf_vc_iv_se_e16m1(<vscale x 4 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.i32.nxv4i16.i32.iXLen(i32 3, i32 31, <vscale x 4 x i16> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.i32.nxv4i16.i32.iXLen(i32, i32, <vscale x 4 x i16>, i32, iXLen)
+
+define void @test_sf_vc_iv_se_e16m2(<vscale x 8 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.i32.nxv8i16.i32.iXLen(i32 3, i32 31, <vscale x 8 x i16> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.i32.nxv8i16.i32.iXLen(i32, i32, <vscale x 8 x i16>, i32, iXLen)
+
+define void @test_sf_vc_iv_se_e16m4(<vscale x 16 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.i32.nxv16i16.i32.iXLen(i32 3, i32 31, <vscale x 16 x i16> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.i32.nxv16i16.i32.iXLen(i32, i32, <vscale x 16 x i16>, i32, iXLen)
+
+define void @test_sf_vc_iv_se_e16m8(<vscale x 32 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.i32.nxv32i16.i32.iXLen(i32 3, i32 31, <vscale x 32 x i16> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.i32.nxv32i16.i32.iXLen(i32, i32, <vscale x 32 x i16>, i32, iXLen)
+
+define void @test_sf_vc_iv_se_e32mf2(<vscale x 1 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.i32.nxv1i32.i32.iXLen(i32 3, i32 31, <vscale x 1 x i32> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.i32.nxv1i32.i32.iXLen(i32, i32, <vscale x 1 x i32>, i32, iXLen)
+
+define void @test_sf_vc_iv_se_e32m1(<vscale x 2 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.i32.nxv2i32.i32.iXLen(i32 3, i32 31, <vscale x 2 x i32> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.i32.nxv2i32.i32.iXLen(i32, i32, <vscale x 2 x i32>, i32, iXLen)
+
+define void @test_sf_vc_iv_se_e32m2(<vscale x 4 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.i32.nxv4i32.i32.iXLen(i32 3, i32 31, <vscale x 4 x i32> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.i32.nxv4i32.i32.iXLen(i32, i32, <vscale x 4 x i32>, i32, iXLen)
+
+define void @test_sf_vc_iv_se_e32m4(<vscale x 8 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.i32.nxv8i32.i32.iXLen(i32 3, i32 31, <vscale x 8 x i32> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.i32.nxv8i32.i32.iXLen(i32, i32, <vscale x 8 x i32>, i32, iXLen)
+
+define void @test_sf_vc_iv_se_e32m8(<vscale x 16 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.i32.nxv16i32.i32.iXLen(i32 3, i32 31, <vscale x 16 x i32> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.i32.nxv16i32.i32.iXLen(i32, i32, <vscale x 16 x i32>, i32, iXLen)
+
+define void @test_sf_vc_iv_se_e64m1(<vscale x 1 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.i32.nxv1i64.i32.iXLen(i32 3, i32 31, <vscale x 1 x i64> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.i32.nxv1i64.i32.iXLen(i32, i32, <vscale x 1 x i64>, i32, iXLen)
+
+define void @test_sf_vc_iv_se_e64m2(<vscale x 2 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.i32.nxv2i64.i32.iXLen(i32 3, i32 31, <vscale x 2 x i64> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.i32.nxv2i64.i32.iXLen(i32, i32, <vscale x 2 x i64>, i32, iXLen)
+
+define void @test_sf_vc_iv_se_e64m4(<vscale x 4 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.i32.nxv4i64.i32.iXLen(i32 3, i32 31, <vscale x 4 x i64> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.i32.nxv4i64.i32.iXLen(i32, i32, <vscale x 4 x i64>, i32, iXLen)
+
+define void @test_sf_vc_iv_se_e64m8(<vscale x 8 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.iv.se.i32.nxv8i64.i32.iXLen(i32 3, i32 31, <vscale x 8 x i64> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.i32.nxv8i64.i32.iXLen(i32, i32, <vscale x 8 x i64>, i32, iXLen)
+
+define <vscale x 1 x i8> @test_sf_vc_v_iv_se_e8mf8(<vscale x 1 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e8mf8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv1i8.i32.i32.iXLen(i32 3, <vscale x 1 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 1 x i8> %0
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv1i8.i32.i32.iXLen(i32, <vscale x 1 x i8>, i32, iXLen)
+
+define <vscale x 2 x i8> @test_sf_vc_v_iv_se_e8mf4(<vscale x 2 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e8mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv2i8.i32.i32.iXLen(i32 3, <vscale x 2 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 2 x i8> %0
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv2i8.i32.i32.iXLen(i32, <vscale x 2 x i8>, i32, iXLen)
+
+define <vscale x 4 x i8> @test_sf_vc_v_iv_se_e8mf2(<vscale x 4 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e8mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv4i8.i32.i32.iXLen(i32 3, <vscale x 4 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 4 x i8> %0
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv4i8.i32.i32.iXLen(i32, <vscale x 4 x i8>, i32, iXLen)
+
+define <vscale x 8 x i8> @test_sf_vc_v_iv_se_e8m1(<vscale x 8 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e8m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv8i8.i32.i32.iXLen(i32 3, <vscale x 8 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 8 x i8> %0
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv8i8.i32.i32.iXLen(i32, <vscale x 8 x i8>, i32, iXLen)
+
+define <vscale x 16 x i8> @test_sf_vc_v_iv_se_e8m2(<vscale x 16 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e8m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv16i8.i32.i32.iXLen(i32 3, <vscale x 16 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 16 x i8> %0
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv16i8.i32.i32.iXLen(i32, <vscale x 16 x i8>, i32, iXLen)
+
+define <vscale x 32 x i8> @test_sf_vc_v_iv_se_e8m4(<vscale x 32 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e8m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv32i8.i32.i32.iXLen(i32 3, <vscale x 32 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 32 x i8> %0
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv32i8.i32.i32.iXLen(i32, <vscale x 32 x i8>, i32, iXLen)
+
+define <vscale x 64 x i8> @test_sf_vc_v_iv_se_e8m8(<vscale x 64 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e8m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv64i8.i32.i32.iXLen(i32 3, <vscale x 64 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 64 x i8> %0
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv64i8.i32.i32.iXLen(i32, <vscale x 64 x i8>, i32, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_iv_se_e16mf4(<vscale x 1 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv1i16.i32.i32.iXLen(i32 3, <vscale x 1 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv1i16.i32.i32.iXLen(i32, <vscale x 1 x i16>, i32, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_iv_se_e16mf2(<vscale x 2 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv2i16.i32.i32.iXLen(i32 3, <vscale x 2 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv2i16.i32.i32.iXLen(i32, <vscale x 2 x i16>, i32, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_iv_se_e16m1(<vscale x 4 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv4i16.i32.i32.iXLen(i32 3, <vscale x 4 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv4i16.i32.i32.iXLen(i32, <vscale x 4 x i16>, i32, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_iv_se_e16m2(<vscale x 8 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv8i16.i32.i32.iXLen(i32 3, <vscale x 8 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv8i16.i32.i32.iXLen(i32, <vscale x 8 x i16>, i32, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_iv_se_e16m4(<vscale x 16 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv16i16.i32.i32.iXLen(i32 3, <vscale x 16 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv16i16.i32.i32.iXLen(i32, <vscale x 16 x i16>, i32, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_iv_se_e16m8(<vscale x 32 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv32i16.i32.i32.iXLen(i32 3, <vscale x 32 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv32i16.i32.i32.iXLen(i32, <vscale x 32 x i16>, i32, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_iv_se_e32mf2(<vscale x 1 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv1i32.i32.i32.iXLen(i32 3, <vscale x 1 x i32> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv1i32.i32.i32.iXLen(i32, <vscale x 1 x i32>, i32, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_iv_se_e32m1(<vscale x 2 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv2i32.i32.i32.iXLen(i32 3, <vscale x 2 x i32> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv2i32.i32.i32.iXLen(i32, <vscale x 2 x i32>, i32, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_iv_se_e32m2(<vscale x 4 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv4i32.i32.i32.iXLen(i32 3, <vscale x 4 x i32> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv4i32.i32.i32.iXLen(i32, <vscale x 4 x i32>, i32, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_iv_se_e32m4(<vscale x 8 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv8i32.i32.i32.iXLen(i32 3, <vscale x 8 x i32> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv8i32.i32.i32.iXLen(i32, <vscale x 8 x i32>, i32, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_iv_se_e32m8(<vscale x 16 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv16i32.i32.i32.iXLen(i32 3, <vscale x 16 x i32> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv16i32.i32.i32.iXLen(i32, <vscale x 16 x i32>, i32, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_iv_se_e64m1(<vscale x 1 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv1i64.i32.i32.iXLen(i32 3, <vscale x 1 x i64> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv1i64.i32.i32.iXLen(i32, <vscale x 1 x i64>, i32, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_iv_se_e64m2(<vscale x 2 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv2i64.i32.i32.iXLen(i32 3, <vscale x 2 x i64> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv2i64.i32.i32.iXLen(i32, <vscale x 2 x i64>, i32, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_iv_se_e64m4(<vscale x 4 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv4i64.i32.i32.iXLen(i32 3, <vscale x 4 x i64> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv4i64.i32.i32.iXLen(i32, <vscale x 4 x i64>, i32, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_iv_se_e64m8(<vscale x 8 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv8i64.i32.i32.iXLen(i32 3, <vscale x 8 x i64> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv8i64.i32.i32.iXLen(i32, <vscale x 8 x i64>, i32, iXLen)
+
+define <vscale x 1 x i8> @test_sf_vc_v_iv_e8mf8(<vscale x 1 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e8mf8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.iv.nxv1i8.i32.i32.iXLen(i32 3, <vscale x 1 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 1 x i8> %0
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.iv.nxv1i8.i32.i32.iXLen(i32, <vscale x 1 x i8>, i32, iXLen)
+
+define <vscale x 2 x i8> @test_sf_vc_v_iv_e8mf4(<vscale x 2 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e8mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.iv.nxv2i8.i32.i32.iXLen(i32 3, <vscale x 2 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 2 x i8> %0
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.iv.nxv2i8.i32.i32.iXLen(i32, <vscale x 2 x i8>, i32, iXLen)
+
+define <vscale x 4 x i8> @test_sf_vc_v_iv_e8mf2(<vscale x 4 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e8mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.iv.nxv4i8.i32.i32.iXLen(i32 3, <vscale x 4 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 4 x i8> %0
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.iv.nxv4i8.i32.i32.iXLen(i32, <vscale x 4 x i8>, i32, iXLen)
+
+define <vscale x 8 x i8> @test_sf_vc_v_iv_e8m1(<vscale x 8 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e8m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.iv.nxv8i8.i32.i32.iXLen(i32 3, <vscale x 8 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 8 x i8> %0
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.iv.nxv8i8.i32.i32.iXLen(i32, <vscale x 8 x i8>, i32, iXLen)
+
+define <vscale x 16 x i8> @test_sf_vc_v_iv_e8m2(<vscale x 16 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e8m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.iv.nxv16i8.i32.i32.iXLen(i32 3, <vscale x 16 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 16 x i8> %0
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.iv.nxv16i8.i32.i32.iXLen(i32, <vscale x 16 x i8>, i32, iXLen)
+
+define <vscale x 32 x i8> @test_sf_vc_v_iv_e8m4(<vscale x 32 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e8m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.iv.nxv32i8.i32.i32.iXLen(i32 3, <vscale x 32 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 32 x i8> %0
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.iv.nxv32i8.i32.i32.iXLen(i32, <vscale x 32 x i8>, i32, iXLen)
+
+define <vscale x 64 x i8> @test_sf_vc_v_iv_e8m8(<vscale x 64 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e8m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.iv.nxv64i8.i32.i32.iXLen(i32 3, <vscale x 64 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 64 x i8> %0
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.iv.nxv64i8.i32.i32.iXLen(i32, <vscale x 64 x i8>, i32, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_iv_e16mf4(<vscale x 1 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.iv.nxv1i16.i32.i32.iXLen(i32 3, <vscale x 1 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.iv.nxv1i16.i32.i32.iXLen(i32, <vscale x 1 x i16>, i32, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_iv_e16mf2(<vscale x 2 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.iv.nxv2i16.i32.i32.iXLen(i32 3, <vscale x 2 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.iv.nxv2i16.i32.i32.iXLen(i32, <vscale x 2 x i16>, i32, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_iv_e16m1(<vscale x 4 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.iv.nxv4i16.i32.i32.iXLen(i32 3, <vscale x 4 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.iv.nxv4i16.i32.i32.iXLen(i32, <vscale x 4 x i16>, i32, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_iv_e16m2(<vscale x 8 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.iv.nxv8i16.i32.i32.iXLen(i32 3, <vscale x 8 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.iv.nxv8i16.i32.i32.iXLen(i32, <vscale x 8 x i16>, i32, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_iv_e16m4(<vscale x 16 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.iv.nxv16i16.i32.i32.iXLen(i32 3, <vscale x 16 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.iv.nxv16i16.i32.i32.iXLen(i32, <vscale x 16 x i16>, i32, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_iv_e16m8(<vscale x 32 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.iv.nxv32i16.i32.i32.iXLen(i32 3, <vscale x 32 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.iv.nxv32i16.i32.i32.iXLen(i32, <vscale x 32 x i16>, i32, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_iv_e32mf2(<vscale x 1 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.iv.nxv1i32.i32.i32.iXLen(i32 3, <vscale x 1 x i32> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.iv.nxv1i32.i32.i32.iXLen(i32, <vscale x 1 x i32>, i32, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_iv_e32m1(<vscale x 2 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.iv.nxv2i32.i32.i32.iXLen(i32 3, <vscale x 2 x i32> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.iv.nxv2i32.i32.i32.iXLen(i32, <vscale x 2 x i32>, i32, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_iv_e32m2(<vscale x 4 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.iv.nxv4i32.i32.i32.iXLen(i32 3, <vscale x 4 x i32> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.iv.nxv4i32.i32.i32.iXLen(i32, <vscale x 4 x i32>, i32, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_iv_e32m4(<vscale x 8 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.iv.nxv8i32.i32.i32.iXLen(i32 3, <vscale x 8 x i32> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.iv.nxv8i32.i32.i32.iXLen(i32, <vscale x 8 x i32>, i32, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_iv_e32m8(<vscale x 16 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.iv.nxv16i32.i32.i32.iXLen(i32 3, <vscale x 16 x i32> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.iv.nxv16i32.i32.i32.iXLen(i32, <vscale x 16 x i32>, i32, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_iv_e64m1(<vscale x 1 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.iv.nxv1i64.i32.i32.iXLen(i32 3, <vscale x 1 x i64> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.iv.nxv1i64.i32.i32.iXLen(i32, <vscale x 1 x i64>, i32, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_iv_e64m2(<vscale x 2 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.iv.nxv2i64.i32.i32.iXLen(i32 3, <vscale x 2 x i64> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.iv.nxv2i64.i32.i32.iXLen(i32, <vscale x 2 x i64>, i32, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_iv_e64m4(<vscale x 4 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.iv.nxv4i64.i32.i32.iXLen(i32 3, <vscale x 4 x i64> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.iv.nxv4i64.i32.i32.iXLen(i32, <vscale x 4 x i64>, i32, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_iv_e64m8(<vscale x 8 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.iv.nxv8i64.i32.i32.iXLen(i32 3, <vscale x 8 x i64> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.iv.nxv8i64.i32.i32.iXLen(i32, <vscale x 8 x i64>, i32, iXLen)
+
+define void @test_sf_vc_fv_se_e16mf4(<vscale x 1 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.i32.nxv1i16.f16.iXLen(i32 1, i32 31, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.i32.nxv1i16.f16.iXLen(i32, i32, <vscale x 1 x i16>, half, iXLen)
+
+define void @test_sf_vc_fv_se_e16mf2(<vscale x 2 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.i32.nxv2i16.f16.iXLen(i32 1, i32 31, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.i32.nxv2i16.f16.iXLen(i32, i32, <vscale x 2 x i16>, half, iXLen)
+
+define void @test_sf_vc_fv_se_e16m1(<vscale x 4 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.i32.nxv4i16.f16.iXLen(i32 1, i32 31, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.i32.nxv4i16.f16.iXLen(i32, i32, <vscale x 4 x i16>, half, iXLen)
+
+define void @test_sf_vc_fv_se_e16m2(<vscale x 8 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.i32.nxv8i16.f16.iXLen(i32 1, i32 31, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.i32.nxv8i16.f16.iXLen(i32, i32, <vscale x 8 x i16>, half, iXLen)
+
+define void @test_sf_vc_fv_se_e16m4(<vscale x 16 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.i32.nxv16i16.f16.iXLen(i32 1, i32 31, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.i32.nxv16i16.f16.iXLen(i32, i32, <vscale x 16 x i16>, half, iXLen)
+
+define void @test_sf_vc_fv_se_e16m8(<vscale x 32 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.i32.nxv32i16.f16.iXLen(i32 1, i32 31, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.i32.nxv32i16.f16.iXLen(i32, i32, <vscale x 32 x i16>, half, iXLen)
+
+define void @test_sf_vc_fv_se_e32mf2(<vscale x 1 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.i32.nxv1i32.f32.iXLen(i32 1, i32 31, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.i32.nxv1i32.f32.iXLen(i32, i32, <vscale x 1 x i32>, float, iXLen)
+
+define void @test_sf_vc_fv_se_e32m1(<vscale x 2 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.i32.nxv2i32.f32.iXLen(i32 1, i32 31, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.i32.nxv2i32.f32.iXLen(i32, i32, <vscale x 2 x i32>, float, iXLen)
+
+define void @test_sf_vc_fv_se_e32m2(<vscale x 4 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.i32.nxv4i32.f32.iXLen(i32 1, i32 31, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.i32.nxv4i32.f32.iXLen(i32, i32, <vscale x 4 x i32>, float, iXLen)
+
+define void @test_sf_vc_fv_se_e32m4(<vscale x 8 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.i32.nxv8i32.f32.iXLen(i32 1, i32 31, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.i32.nxv8i32.f32.iXLen(i32, i32, <vscale x 8 x i32>, float, iXLen)
+
+define void @test_sf_vc_fv_se_e32m8(<vscale x 16 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.i32.nxv16i32.f32.iXLen(i32 1, i32 31, <vscale x 16 x i32> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.i32.nxv16i32.f32.iXLen(i32, i32, <vscale x 16 x i32>, float, iXLen)
+
+define void @test_sf_vc_fv_se_e64m1(<vscale x 1 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.i32.nxv1i64.f64.iXLen(i32 1, i32 31, <vscale x 1 x i64> %vs2, double %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.i32.nxv1i64.f64.iXLen(i32, i32, <vscale x 1 x i64>, double, iXLen)
+
+define void @test_sf_vc_fv_se_e64m2(<vscale x 2 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.i32.nxv2i64.f64.iXLen(i32 1, i32 31, <vscale x 2 x i64> %vs2, double %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.i32.nxv2i64.f64.iXLen(i32, i32, <vscale x 2 x i64>, double, iXLen)
+
+define void @test_sf_vc_fv_se_e64m4(<vscale x 4 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.i32.nxv4i64.f64.iXLen(i32 1, i32 31, <vscale x 4 x i64> %vs2, double %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.i32.nxv4i64.f64.iXLen(i32, i32, <vscale x 4 x i64>, double, iXLen)
+
+define void @test_sf_vc_fv_se_e64m8(<vscale x 8 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fv.se.i32.nxv8i64.f64.iXLen(i32 1, i32 31, <vscale x 8 x i64> %vs2, double %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.i32.nxv8i64.f64.iXLen(i32, i32, <vscale x 8 x i64>, double, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_fv_se_e16mf4(<vscale x 1 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv1i16.i32.f16.iXLen(i32 1, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv1i16.i32.f16.iXLen(i32, <vscale x 1 x i16>, half, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_fv_se_e16mf2(<vscale x 2 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv2i16.i32.f16.iXLen(i32 1, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv2i16.i32.f16.iXLen(i32, <vscale x 2 x i16>, half, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_fv_se_e16m1(<vscale x 4 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv4i16.i32.f16.iXLen(i32 1, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv4i16.i32.f16.iXLen(i32, <vscale x 4 x i16>, half, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_fv_se_e16m2(<vscale x 8 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv8i16.i32.f16.iXLen(i32 1, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv8i16.i32.f16.iXLen(i32, <vscale x 8 x i16>, half, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_fv_se_e16m4(<vscale x 16 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv16i16.i32.f16.iXLen(i32 1, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv16i16.i32.f16.iXLen(i32, <vscale x 16 x i16>, half, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_fv_se_e16m8(<vscale x 32 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv32i16.i32.f16.iXLen(i32 1, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv32i16.i32.f16.iXLen(i32, <vscale x 32 x i16>, half, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_fv_se_e32mf2(<vscale x 1 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv1i32.i32.f32.iXLen(i32 1, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv1i32.i32.f32.iXLen(i32, <vscale x 1 x i32>, float, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_fv_se_e32m1(<vscale x 2 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv2i32.i32.f32.iXLen(i32 1, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv2i32.i32.f32.iXLen(i32, <vscale x 2 x i32>, float, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_fv_se_e32m2(<vscale x 4 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv4i32.i32.f32.iXLen(i32 1, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv4i32.i32.f32.iXLen(i32, <vscale x 4 x i32>, float, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_fv_se_e32m4(<vscale x 8 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv8i32.i32.f32.iXLen(i32 1, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv8i32.i32.f32.iXLen(i32, <vscale x 8 x i32>, float, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_fv_se_e32m8(<vscale x 16 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv16i32.i32.f32.iXLen(i32 1, <vscale x 16 x i32> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv16i32.i32.f32.iXLen(i32, <vscale x 16 x i32>, float, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_fv_se_e64m1(<vscale x 1 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv1i64.i32.f64.iXLen(i32 1, <vscale x 1 x i64> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv1i64.i32.f64.iXLen(i32, <vscale x 1 x i64>, double, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_fv_se_e64m2(<vscale x 2 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv2i64.i32.f64.iXLen(i32 1, <vscale x 2 x i64> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv2i64.i32.f64.iXLen(i32, <vscale x 2 x i64>, double, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_fv_se_e64m4(<vscale x 4 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv4i64.i32.f64.iXLen(i32 1, <vscale x 4 x i64> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv4i64.i32.f64.iXLen(i32, <vscale x 4 x i64>, double, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_fv_se_e64m8(<vscale x 8 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv8i64.i32.f64.iXLen(i32 1, <vscale x 8 x i64> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv8i64.i32.f64.iXLen(i32, <vscale x 8 x i64>, double, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_fv_e16mf4(<vscale x 1 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fv.nxv1i16.i32.f16.iXLen(i32 1, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fv.nxv1i16.i32.f16.iXLen(i32, <vscale x 1 x i16>, half, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_fv_e16mf2(<vscale x 2 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fv.nxv2i16.i32.f16.iXLen(i32 1, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fv.nxv2i16.i32.f16.iXLen(i32, <vscale x 2 x i16>, half, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_fv_e16m1(<vscale x 4 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fv.nxv4i16.i32.f16.iXLen(i32 1, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fv.nxv4i16.i32.f16.iXLen(i32, <vscale x 4 x i16>, half, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_fv_e16m2(<vscale x 8 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fv.nxv8i16.i32.f16.iXLen(i32 1, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fv.nxv8i16.i32.f16.iXLen(i32, <vscale x 8 x i16>, half, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_fv_e16m4(<vscale x 16 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fv.nxv16i16.i32.f16.iXLen(i32 1, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fv.nxv16i16.i32.f16.iXLen(i32, <vscale x 16 x i16>, half, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_fv_e16m8(<vscale x 32 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fv.nxv32i16.i32.f16.iXLen(i32 1, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fv.nxv32i16.i32.f16.iXLen(i32, <vscale x 32 x i16>, half, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_fv_e32mf2(<vscale x 1 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fv.nxv1i32.i32.f32.iXLen(i32 1, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fv.nxv1i32.i32.f32.iXLen(i32, <vscale x 1 x i32>, float, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_fv_e32m1(<vscale x 2 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fv.nxv2i32.i32.f32.iXLen(i32 1, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fv.nxv2i32.i32.f32.iXLen(i32, <vscale x 2 x i32>, float, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_fv_e32m2(<vscale x 4 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fv.nxv4i32.i32.f32.iXLen(i32 1, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fv.nxv4i32.i32.f32.iXLen(i32, <vscale x 4 x i32>, float, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_fv_e32m4(<vscale x 8 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fv.nxv8i32.i32.f32.iXLen(i32 1, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fv.nxv8i32.i32.f32.iXLen(i32, <vscale x 8 x i32>, float, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_fv_e32m8(<vscale x 16 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fv.nxv16i32.i32.f32.iXLen(i32 1, <vscale x 16 x i32> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fv.nxv16i32.i32.f32.iXLen(i32, <vscale x 16 x i32>, float, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_fv_e64m1(<vscale x 1 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fv.nxv1i64.i32.f64.iXLen(i32 1, <vscale x 1 x i64> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fv.nxv1i64.i32.f64.iXLen(i32, <vscale x 1 x i64>, double, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_fv_e64m2(<vscale x 2 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fv.nxv2i64.i32.f64.iXLen(i32 1, <vscale x 2 x i64> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fv.nxv2i64.i32.f64.iXLen(i32, <vscale x 2 x i64>, double, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_fv_e64m4(<vscale x 4 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fv.nxv4i64.i32.f64.iXLen(i32 1, <vscale x 4 x i64> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fv.nxv4i64.i32.f64.iXLen(i32, <vscale x 4 x i64>, double, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_fv_e64m8(<vscale x 8 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fv.nxv8i64.i32.f64.iXLen(i32 1, <vscale x 8 x i64> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fv.nxv8i64.i32.f64.iXLen(i32, <vscale x 8 x i64>, double, iXLen)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv-rv64.ll
new file mode 100644
index 0000000000000..607bc1ebadb8a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv-rv64.ll
@@ -0,0 +1,159 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN:  llc -mtriple=riscv64 -mattr=+v,+xsfvcp \
+; RUN:    -verify-machineinstrs < %s | FileCheck %s
+
+define void @test_sf_vc_xvv_se_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i64.i64.i64(i32 3, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, i64 %rs1, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i64.i64.i64(i32, <vscale x 1 x i64>, <vscale x 1 x i64>, i64, i64)
+
+define void @test_sf_vc_xvv_se_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i64.i64.i64(i32 3, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, i64 %rs1, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i64.i64.i64(i32, <vscale x 2 x i64>, <vscale x 2 x i64>, i64, i64)
+
+define void @test_sf_vc_xvv_se_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i64.i64.i64(i32 3, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, i64 %rs1, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i64.i64.i64(i32, <vscale x 4 x i64>, <vscale x 4 x i64>, i64, i64)
+
+define void @test_sf_vc_xvv_se_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i64.i64.i64(i32 3, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, i64 %rs1, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i64.i64.i64(i32, <vscale x 8 x i64>, <vscale x 8 x i64>, i64, i64)
+
+define <vscale x 1 x i64> @test_sf_vc_v_xvv_se_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvv.se.nxv1i64.i32.i64.i64(i32 3, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, i64 %rs1, i64 %vl)
+  ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvv.se.nxv1i64.i32.i64.i64(i32, <vscale x 1 x i64>, <vscale x 1 x i64>, i64, i64)
+
+define <vscale x 2 x i64> @test_sf_vc_v_xvv_se_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvv.se.nxv2i64.i32.i64.i64(i32 3, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, i64 %rs1, i64 %vl)
+  ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvv.se.nxv2i64.i32.i64.i64(i32, <vscale x 2 x i64>, <vscale x 2 x i64>, i64, i64)
+
+define <vscale x 4 x i64> @test_sf_vc_v_xvv_se_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvv.se.nxv4i64.i32.i64.i64(i32 3, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, i64 %rs1, i64 %vl)
+  ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvv.se.nxv4i64.i32.i64.i64(i32, <vscale x 4 x i64>, <vscale x 4 x i64>, i64, i64)
+
+define <vscale x 8 x i64> @test_sf_vc_v_xvv_se_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvv.se.nxv8i64.i32.i64.i64(i32 3, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, i64 %rs1, i64 %vl)
+  ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvv.se.nxv8i64.i32.i64.i64(i32, <vscale x 8 x i64>, <vscale x 8 x i64>, i64, i64)
+
+define <vscale x 1 x i64> @test_sf_vc_v_xvv_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvv.nxv1i64.i32.i64.i64(i32 3, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, i64 %rs1, i64 %vl)
+  ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvv.nxv1i64.i32.i64.i64(i32, <vscale x 1 x i64>, <vscale x 1 x i64>, i64, i64)
+
+define <vscale x 2 x i64> @test_sf_vc_v_xvv_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvv.nxv2i64.i32.i64.i64(i32 3, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, i64 %rs1, i64 %vl)
+  ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvv.nxv2i64.i32.i64.i64(i32, <vscale x 2 x i64>, <vscale x 2 x i64>, i64, i64)
+
+define <vscale x 4 x i64> @test_sf_vc_v_xvv_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvv.nxv4i64.i32.i64.i64(i32 3, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, i64 %rs1, i64 %vl)
+  ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvv.nxv4i64.i32.i64.i64(i32, <vscale x 4 x i64>, <vscale x 4 x i64>, i64, i64)
+
+define <vscale x 8 x i64> @test_sf_vc_v_xvv_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, i64 %rs1, i64 %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvv.nxv8i64.i32.i64.i64(i32 3, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, i64 %rs1, i64 %vl)
+  ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvv.nxv8i64.i32.i64.i64(i32, <vscale x 8 x i64>, <vscale x 8 x i64>, i64, i64)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll
new file mode 100644
index 0000000000000..bb23f3cbfa52a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll
@@ -0,0 +1,3020 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN:  sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+xsfvcp \
+; RUN:    -verify-machineinstrs | FileCheck %s
+; RUN:  sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+xsfvcp \
+; RUN:    -verify-machineinstrs | FileCheck %s
+
+define void @test_sf_vc_vvv_se_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e8mf8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.i32.nxv1i8.nxv1i8.iXLen(i32 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.i32.nxv1i8.nxv1i8.iXLen(i32, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
+
+define void @test_sf_vc_vvv_se_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e8mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.i32.nxv2i8.nxv2i8.iXLen(i32 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.i32.nxv2i8.nxv2i8.iXLen(i32, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
+
+define void @test_sf_vc_vvv_se_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e8mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.i32.nxv4i8.nxv4i8.iXLen(i32 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.i32.nxv4i8.nxv4i8.iXLen(i32, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
+
+define void @test_sf_vc_vvv_se_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e8m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.i32.nxv8i8.nxv8i8.iXLen(i32 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.i32.nxv8i8.nxv8i8.iXLen(i32, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
+
+define void @test_sf_vc_vvv_se_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e8m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.i32.nxv16i8.nxv16i8.iXLen(i32 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.i32.nxv16i8.nxv16i8.iXLen(i32, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
+
+define void @test_sf_vc_vvv_se_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e8m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.i32.nxv32i8.nxv32i8.iXLen(i32 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.i32.nxv32i8.nxv32i8.iXLen(i32, <vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
+
+define void @test_sf_vc_vvv_se_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e8m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8r.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.i32.nxv64i8.nxv64i8.iXLen(i32 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.i32.nxv64i8.nxv64i8.iXLen(i32, <vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
+
+define void @test_sf_vc_vvv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.i32.nxv1i16.nxv1i16.iXLen(i32 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.i32.nxv1i16.nxv1i16.iXLen(i32, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
+
+define void @test_sf_vc_vvv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.i32.nxv2i16.nxv2i16.iXLen(i32 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.i32.nxv2i16.nxv2i16.iXLen(i32, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
+
+define void @test_sf_vc_vvv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.i32.nxv4i16.nxv4i16.iXLen(i32 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.i32.nxv4i16.nxv4i16.iXLen(i32, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
+
+define void @test_sf_vc_vvv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.i32.nxv8i16.nxv8i16.iXLen(i32 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.i32.nxv8i16.nxv8i16.iXLen(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
+
+define void @test_sf_vc_vvv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.i32.nxv16i16.nxv16i16.iXLen(i32 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.i32.nxv16i16.nxv16i16.iXLen(i32, <vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
+
+define void @test_sf_vc_vvv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re16.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.i32.nxv32i16.nxv32i16.iXLen(i32 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.i32.nxv32i16.nxv32i16.iXLen(i32, <vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
+
+define void @test_sf_vc_vvv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.i32.nxv1i32.nxv1i32.iXLen(i32 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.i32.nxv1i32.nxv1i32.iXLen(i32, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
+
+define void @test_sf_vc_vvv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.i32.nxv2i32.nxv2i32.iXLen(i32 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.i32.nxv2i32.nxv2i32.iXLen(i32, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
+
+define void @test_sf_vc_vvv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.i32.nxv4i32.nxv4i32.iXLen(i32 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.i32.nxv4i32.nxv4i32.iXLen(i32, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
+
+define void @test_sf_vc_vvv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.i32.nxv8i32.nxv8i32.iXLen(i32 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.i32.nxv8i32.nxv8i32.iXLen(i32, <vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
+
+define void @test_sf_vc_vvv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re32.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.i32.nxv16i32.nxv16i32.iXLen(i32 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.i32.nxv16i32.nxv16i32.iXLen(i32, <vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen)
+
+define void @test_sf_vc_vvv_se_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.i32.nxv1i64.nxv1i64.iXLen(i32 3, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.i32.nxv1i64.nxv1i64.iXLen(i32, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen)
+
+define void @test_sf_vc_vvv_se_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.i32.nxv2i64.nxv2i64.iXLen(i32 3, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.i32.nxv2i64.nxv2i64.iXLen(i32, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen)
+
+define void @test_sf_vc_vvv_se_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.i32.nxv4i64.nxv4i64.iXLen(i32 3, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.i32.nxv4i64.nxv4i64.iXLen(i32, <vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen)
+
+define void @test_sf_vc_vvv_se_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re64.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.vvv 3, v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvv.se.i32.nxv8i64.nxv8i64.iXLen(i32 3, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.i32.nxv8i64.nxv8i64.iXLen(i32, <vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen)
+
+define <vscale x 1 x i8> @test_sf_vc_v_vvv_se_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e8mf8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv1i8.i32.nxv1i8.iXLen(i32 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
+  ret <vscale x 1 x i8> %0
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv1i8.i32.nxv1i8.iXLen(i32, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
+
+define <vscale x 2 x i8> @test_sf_vc_v_vvv_se_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e8mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv2i8.i32.nxv2i8.iXLen(i32 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
+  ret <vscale x 2 x i8> %0
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv2i8.i32.nxv2i8.iXLen(i32, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
+
+define <vscale x 4 x i8> @test_sf_vc_v_vvv_se_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e8mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv4i8.i32.nxv4i8.iXLen(i32 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
+  ret <vscale x 4 x i8> %0
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv4i8.i32.nxv4i8.iXLen(i32, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
+
+define <vscale x 8 x i8> @test_sf_vc_v_vvv_se_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv8i8.i32.nxv8i8.iXLen(i32 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
+  ret <vscale x 8 x i8> %0
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv8i8.i32.nxv8i8.iXLen(i32, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
+
+define <vscale x 16 x i8> @test_sf_vc_v_vvv_se_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv16i8.i32.nxv16i8.iXLen(i32 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
+  ret <vscale x 16 x i8> %0
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv16i8.i32.nxv16i8.iXLen(i32, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
+
+define <vscale x 32 x i8> @test_sf_vc_v_vvv_se_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv32i8.i32.nxv32i8.iXLen(i32 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
+  ret <vscale x 32 x i8> %0
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv32i8.i32.nxv32i8.iXLen(i32, <vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
+
+define <vscale x 64 x i8> @test_sf_vc_v_vvv_se_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8r.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.i32.nxv64i8.iXLen(i32 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl)
+  ret <vscale x 64 x i8> %0
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.i32.nxv64i8.iXLen(i32, <vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_vvv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv1i16.i32.nxv1i16.iXLen(i32 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
+  ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv1i16.i32.nxv1i16.iXLen(i32, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_vvv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv2i16.i32.nxv2i16.iXLen(i32 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
+  ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv2i16.i32.nxv2i16.iXLen(i32, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_vvv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv4i16.i32.nxv4i16.iXLen(i32 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
+  ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv4i16.i32.nxv4i16.iXLen(i32, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_vvv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv8i16.i32.nxv8i16.iXLen(i32 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
+  ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv8i16.i32.nxv8i16.iXLen(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_vvv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv16i16.i32.nxv16i16.iXLen(i32 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
+  ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv16i16.i32.nxv16i16.iXLen(i32, <vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_vvv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re16.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.i32.nxv32i16.iXLen(i32 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl)
+  ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.i32.nxv32i16.iXLen(i32, <vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_vvv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv1i32.i32.nxv1i32.iXLen(i32 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
+  ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv1i32.i32.nxv1i32.iXLen(i32, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_vvv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv2i32.i32.nxv2i32.iXLen(i32 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
+  ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv2i32.i32.nxv2i32.iXLen(i32, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_vvv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv4i32.i32.nxv4i32.iXLen(i32 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
+  ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv4i32.i32.nxv4i32.iXLen(i32, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_vvv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv8i32.i32.nxv8i32.iXLen(i32 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
+  ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv8i32.i32.nxv8i32.iXLen(i32, <vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_vvv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re32.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.i32.nxv16i32.iXLen(i32 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl)
+  ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.i32.nxv16i32.iXLen(i32, <vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_vvv_se_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv1i64.i32.nxv1i64.iXLen(i32 3, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl)
+  ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv1i64.i32.nxv1i64.iXLen(i32, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_vvv_se_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv2i64.i32.nxv2i64.iXLen(i32 3, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl)
+  ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv2i64.i32.nxv2i64.iXLen(i32, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_vvv_se_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv4i64.i32.nxv4i64.iXLen(i32 3, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl)
+  ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv4i64.i32.nxv4i64.iXLen(i32, <vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_vvv_se_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re64.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.i32.nxv8i64.iXLen(i32 3, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl)
+  ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.i32.nxv8i64.iXLen(i32, <vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen)
+
+define <vscale x 1 x i8> @test_sf_vc_v_vvv_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e8mf8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.nxv1i8.i32.nxv1i8.iXLen(i32 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
+  ret <vscale x 1 x i8> %0
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.nxv1i8.i32.nxv1i8.iXLen(i32, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
+
+define <vscale x 2 x i8> @test_sf_vc_v_vvv_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e8mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.nxv2i8.i32.nxv2i8.iXLen(i32 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
+  ret <vscale x 2 x i8> %0
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.nxv2i8.i32.nxv2i8.iXLen(i32, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
+
+define <vscale x 4 x i8> @test_sf_vc_v_vvv_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e8mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.nxv4i8.i32.nxv4i8.iXLen(i32 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
+  ret <vscale x 4 x i8> %0
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.nxv4i8.i32.nxv4i8.iXLen(i32, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
+
+define <vscale x 8 x i8> @test_sf_vc_v_vvv_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e8m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.nxv8i8.i32.nxv8i8.iXLen(i32 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
+  ret <vscale x 8 x i8> %0
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.nxv8i8.i32.nxv8i8.iXLen(i32, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
+
+define <vscale x 16 x i8> @test_sf_vc_v_vvv_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e8m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.nxv16i8.i32.nxv16i8.iXLen(i32 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
+  ret <vscale x 16 x i8> %0
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.nxv16i8.i32.nxv16i8.iXLen(i32, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
+
+define <vscale x 32 x i8> @test_sf_vc_v_vvv_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e8m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.nxv32i8.i32.nxv32i8.iXLen(i32 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
+  ret <vscale x 32 x i8> %0
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.nxv32i8.i32.nxv32i8.iXLen(i32, <vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
+
+define <vscale x 64 x i8> @test_sf_vc_v_vvv_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e8m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8r.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.nxv64i8.i32.nxv64i8.iXLen(i32 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl)
+  ret <vscale x 64 x i8> %0
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.nxv64i8.i32.nxv64i8.iXLen(i32, <vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_vvv_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.nxv1i16.i32.nxv1i16.iXLen(i32 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
+  ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.nxv1i16.i32.nxv1i16.iXLen(i32, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_vvv_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.nxv2i16.i32.nxv2i16.iXLen(i32 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
+  ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.nxv2i16.i32.nxv2i16.iXLen(i32, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_vvv_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.nxv4i16.i32.nxv4i16.iXLen(i32 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
+  ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.nxv4i16.i32.nxv4i16.iXLen(i32, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_vvv_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.nxv8i16.i32.nxv8i16.iXLen(i32 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
+  ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.nxv8i16.i32.nxv8i16.iXLen(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_vvv_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.nxv16i16.i32.nxv16i16.iXLen(i32 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
+  ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.nxv16i16.i32.nxv16i16.iXLen(i32, <vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_vvv_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re16.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.nxv32i16.i32.nxv32i16.iXLen(i32 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl)
+  ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.nxv32i16.i32.nxv32i16.iXLen(i32, <vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_vvv_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.nxv1i32.i32.nxv1i32.iXLen(i32 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
+  ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.nxv1i32.i32.nxv1i32.iXLen(i32, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_vvv_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.nxv2i32.i32.nxv2i32.iXLen(i32 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
+  ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.nxv2i32.i32.nxv2i32.iXLen(i32, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_vvv_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.nxv4i32.i32.nxv4i32.iXLen(i32 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
+  ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.nxv4i32.i32.nxv4i32.iXLen(i32, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_vvv_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.nxv8i32.i32.nxv8i32.iXLen(i32 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
+  ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.nxv8i32.i32.nxv8i32.iXLen(i32, <vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_vvv_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re32.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.nxv16i32.i32.nxv16i32.iXLen(i32 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl)
+  ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.nxv16i32.i32.nxv16i32.iXLen(i32, <vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_vvv_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.nxv1i64.i32.nxv1i64.iXLen(i32 3, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl)
+  ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.nxv1i64.i32.nxv1i64.iXLen(i32, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_vvv_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.nxv2i64.i32.nxv2i64.iXLen(i32 3, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl)
+  ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.nxv2i64.i32.nxv2i64.iXLen(i32, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_vvv_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.nxv4i64.i32.nxv4i64.iXLen(i32 3, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl)
+  ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.nxv4i64.i32.nxv4i64.iXLen(i32, <vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_vvv_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re64.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.nxv8i64.i32.nxv8i64.iXLen(i32 3, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl)
+  ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.nxv8i64.i32.nxv8i64.iXLen(i32, <vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen)
+
+define void @test_sf_vc_xvv_se_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e8mf8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i8.i8.iXLen(i32 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i8.i8.iXLen(i32, <vscale x 1 x i8>, <vscale x 1 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvv_se_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e8mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i8.i8.iXLen(i32 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i8.i8.iXLen(i32, <vscale x 2 x i8>, <vscale x 2 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvv_se_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e8mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i8.i8.iXLen(i32 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i8.i8.iXLen(i32, <vscale x 4 x i8>, <vscale x 4 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvv_se_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e8m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i8.i8.iXLen(i32 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i8.i8.iXLen(i32, <vscale x 8 x i8>, <vscale x 8 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvv_se_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e8m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i8.i8.iXLen(i32 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i8.i8.iXLen(i32, <vscale x 16 x i8>, <vscale x 16 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvv_se_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e8m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv32i8.i8.iXLen(i32 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv32i8.i8.iXLen(i32, <vscale x 32 x i8>, <vscale x 32 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvv_se_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e8m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv64i8.i8.iXLen(i32 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv64i8.i8.iXLen(i32, <vscale x 64 x i8>, <vscale x 64 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i16.i16.iXLen(i32 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i16.i16.iXLen(i32, <vscale x 1 x i16>, <vscale x 1 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xvv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i16.i16.iXLen(i32 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i16.i16.iXLen(i32, <vscale x 2 x i16>, <vscale x 2 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xvv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i16.i16.iXLen(i32 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i16.i16.iXLen(i32, <vscale x 4 x i16>, <vscale x 4 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xvv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i16.i16.iXLen(i32 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i16.i16.iXLen(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xvv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i16.i16.iXLen(i32 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i16.i16.iXLen(i32, <vscale x 16 x i16>, <vscale x 16 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xvv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv32i16.i16.iXLen(i32 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv32i16.i16.iXLen(i32, <vscale x 32 x i16>, <vscale x 32 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xvv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i32.i32.iXLen(i32 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i32.i32.iXLen(i32, <vscale x 1 x i32>, <vscale x 1 x i32>, i32, iXLen)
+
+define void @test_sf_vc_xvv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i32.i32.iXLen(i32 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i32.i32.iXLen(i32, <vscale x 2 x i32>, <vscale x 2 x i32>, i32, iXLen)
+
+define void @test_sf_vc_xvv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i32.i32.iXLen(i32 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i32.i32.iXLen(i32, <vscale x 4 x i32>, <vscale x 4 x i32>, i32, iXLen)
+
+define void @test_sf_vc_xvv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i32.i32.iXLen(i32 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i32.i32.iXLen(i32, <vscale x 8 x i32>, <vscale x 8 x i32>, i32, iXLen)
+
+define void @test_sf_vc_xvv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.xvv 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i32.i32.iXLen(i32 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i32.i32.iXLen(i32, <vscale x 16 x i32>, <vscale x 16 x i32>, i32, iXLen)
+
+define <vscale x 1 x i8> @test_sf_vc_v_xvv_se_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e8mf8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv1i8.i32.i8.iXLen(i32 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 1 x i8> %0
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv1i8.i32.i8.iXLen(i32, <vscale x 1 x i8>, <vscale x 1 x i8>, i8, iXLen)
+
+define <vscale x 2 x i8> @test_sf_vc_v_xvv_se_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e8mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv2i8.i32.i8.iXLen(i32 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 2 x i8> %0
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv2i8.i32.i8.iXLen(i32, <vscale x 2 x i8>, <vscale x 2 x i8>, i8, iXLen)
+
+define <vscale x 4 x i8> @test_sf_vc_v_xvv_se_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e8mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv4i8.i32.i8.iXLen(i32 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 4 x i8> %0
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv4i8.i32.i8.iXLen(i32, <vscale x 4 x i8>, <vscale x 4 x i8>, i8, iXLen)
+
+define <vscale x 8 x i8> @test_sf_vc_v_xvv_se_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv8i8.i32.i8.iXLen(i32 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 8 x i8> %0
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv8i8.i32.i8.iXLen(i32, <vscale x 8 x i8>, <vscale x 8 x i8>, i8, iXLen)
+
+define <vscale x 16 x i8> @test_sf_vc_v_xvv_se_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv16i8.i32.i8.iXLen(i32 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 16 x i8> %0
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv16i8.i32.i8.iXLen(i32, <vscale x 16 x i8>, <vscale x 16 x i8>, i8, iXLen)
+
+define <vscale x 32 x i8> @test_sf_vc_v_xvv_se_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv32i8.i32.i8.iXLen(i32 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 32 x i8> %0
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv32i8.i32.i8.iXLen(i32, <vscale x 32 x i8>, <vscale x 32 x i8>, i8, iXLen)
+
+define <vscale x 64 x i8> @test_sf_vc_v_xvv_se_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv64i8.i32.i8.iXLen(i32 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 64 x i8> %0
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv64i8.i32.i8.iXLen(i32, <vscale x 64 x i8>, <vscale x 64 x i8>, i8, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_xvv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv1i16.i32.i16.iXLen(i32 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv1i16.i32.i16.iXLen(i32, <vscale x 1 x i16>, <vscale x 1 x i16>, i16, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_xvv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv2i16.i32.i16.iXLen(i32 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv2i16.i32.i16.iXLen(i32, <vscale x 2 x i16>, <vscale x 2 x i16>, i16, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_xvv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv4i16.i32.i16.iXLen(i32 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv4i16.i32.i16.iXLen(i32, <vscale x 4 x i16>, <vscale x 4 x i16>, i16, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_xvv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv8i16.i32.i16.iXLen(i32 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv8i16.i32.i16.iXLen(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, i16, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_xvv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv16i16.i32.i16.iXLen(i32 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv16i16.i32.i16.iXLen(i32, <vscale x 16 x i16>, <vscale x 16 x i16>, i16, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_xvv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv32i16.i32.i16.iXLen(i32 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv32i16.i32.i16.iXLen(i32, <vscale x 32 x i16>, <vscale x 32 x i16>, i16, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_xvv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv1i32.i32.i32.iXLen(i32 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv1i32.i32.i32.iXLen(i32, <vscale x 1 x i32>, <vscale x 1 x i32>, i32, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_xvv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv2i32.i32.i32.iXLen(i32 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv2i32.i32.i32.iXLen(i32, <vscale x 2 x i32>, <vscale x 2 x i32>, i32, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_xvv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv4i32.i32.i32.iXLen(i32 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv4i32.i32.i32.iXLen(i32, <vscale x 4 x i32>, <vscale x 4 x i32>, i32, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_xvv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv8i32.i32.i32.iXLen(i32 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv8i32.i32.i32.iXLen(i32, <vscale x 8 x i32>, <vscale x 8 x i32>, i32, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_xvv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv16i32.i32.i32.iXLen(i32 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv16i32.i32.i32.iXLen(i32, <vscale x 16 x i32>, <vscale x 16 x i32>, i32, iXLen)
+
+define <vscale x 1 x i8> @test_sf_vc_v_xvv_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e8mf8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.nxv1i8.i32.i8.iXLen(i32 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 1 x i8> %0
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.nxv1i8.i32.i8.iXLen(i32, <vscale x 1 x i8>, <vscale x 1 x i8>, i8, iXLen)
+
+define <vscale x 2 x i8> @test_sf_vc_v_xvv_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e8mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.nxv2i8.i32.i8.iXLen(i32 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 2 x i8> %0
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.nxv2i8.i32.i8.iXLen(i32, <vscale x 2 x i8>, <vscale x 2 x i8>, i8, iXLen)
+
+define <vscale x 4 x i8> @test_sf_vc_v_xvv_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e8mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.nxv4i8.i32.i8.iXLen(i32 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 4 x i8> %0
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.nxv4i8.i32.i8.iXLen(i32, <vscale x 4 x i8>, <vscale x 4 x i8>, i8, iXLen)
+
+define <vscale x 8 x i8> @test_sf_vc_v_xvv_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e8m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.nxv8i8.i32.i8.iXLen(i32 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 8 x i8> %0
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.nxv8i8.i32.i8.iXLen(i32, <vscale x 8 x i8>, <vscale x 8 x i8>, i8, iXLen)
+
+define <vscale x 16 x i8> @test_sf_vc_v_xvv_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e8m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.nxv16i8.i32.i8.iXLen(i32 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 16 x i8> %0
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.nxv16i8.i32.i8.iXLen(i32, <vscale x 16 x i8>, <vscale x 16 x i8>, i8, iXLen)
+
+define <vscale x 32 x i8> @test_sf_vc_v_xvv_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e8m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.nxv32i8.i32.i8.iXLen(i32 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 32 x i8> %0
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.nxv32i8.i32.i8.iXLen(i32, <vscale x 32 x i8>, <vscale x 32 x i8>, i8, iXLen)
+
+define <vscale x 64 x i8> @test_sf_vc_v_xvv_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e8m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.nxv64i8.i32.i8.iXLen(i32 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 64 x i8> %0
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.nxv64i8.i32.i8.iXLen(i32, <vscale x 64 x i8>, <vscale x 64 x i8>, i8, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_xvv_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.nxv1i16.i32.i16.iXLen(i32 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.nxv1i16.i32.i16.iXLen(i32, <vscale x 1 x i16>, <vscale x 1 x i16>, i16, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_xvv_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.nxv2i16.i32.i16.iXLen(i32 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.nxv2i16.i32.i16.iXLen(i32, <vscale x 2 x i16>, <vscale x 2 x i16>, i16, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_xvv_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.nxv4i16.i32.i16.iXLen(i32 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.nxv4i16.i32.i16.iXLen(i32, <vscale x 4 x i16>, <vscale x 4 x i16>, i16, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_xvv_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.nxv8i16.i32.i16.iXLen(i32 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.nxv8i16.i32.i16.iXLen(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, i16, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_xvv_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.nxv16i16.i32.i16.iXLen(i32 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.nxv16i16.i32.i16.iXLen(i32, <vscale x 16 x i16>, <vscale x 16 x i16>, i16, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_xvv_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.nxv32i16.i32.i16.iXLen(i32 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.nxv32i16.i32.i16.iXLen(i32, <vscale x 32 x i16>, <vscale x 32 x i16>, i16, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_xvv_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.nxv1i32.i32.i32.iXLen(i32 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.nxv1i32.i32.i32.iXLen(i32, <vscale x 1 x i32>, <vscale x 1 x i32>, i32, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_xvv_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.nxv2i32.i32.i32.iXLen(i32 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.nxv2i32.i32.i32.iXLen(i32, <vscale x 2 x i32>, <vscale x 2 x i32>, i32, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_xvv_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.nxv4i32.i32.i32.iXLen(i32 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.nxv4i32.i32.i32.iXLen(i32, <vscale x 4 x i32>, <vscale x 4 x i32>, i32, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_xvv_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.nxv8i32.i32.i32.iXLen(i32 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.nxv8i32.i32.i32.iXLen(i32, <vscale x 8 x i32>, <vscale x 8 x i32>, i32, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_xvv_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.nxv16i32.i32.i32.iXLen(i32 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.nxv16i32.i32.i32.iXLen(i32, <vscale x 16 x i32>, <vscale x 16 x i32>, i32, iXLen)
+
+define void @test_sf_vc_ivv_se_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e8mf8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.i32.nxv1i8.i32.iXLen(i32 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.i32.nxv1i8.i32.iXLen(i32, <vscale x 1 x i8>, <vscale x 1 x i8>, i32, iXLen)
+
+define void @test_sf_vc_ivv_se_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e8mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.i32.nxv2i8.i32.iXLen(i32 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.i32.nxv2i8.i32.iXLen(i32, <vscale x 2 x i8>, <vscale x 2 x i8>, i32, iXLen)
+
+define void @test_sf_vc_ivv_se_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e8mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.i32.nxv4i8.i32.iXLen(i32 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.i32.nxv4i8.i32.iXLen(i32, <vscale x 4 x i8>, <vscale x 4 x i8>, i32, iXLen)
+
+define void @test_sf_vc_ivv_se_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e8m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.i32.nxv8i8.i32.iXLen(i32 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.i32.nxv8i8.i32.iXLen(i32, <vscale x 8 x i8>, <vscale x 8 x i8>, i32, iXLen)
+
+define void @test_sf_vc_ivv_se_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e8m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.i32.nxv16i8.i32.iXLen(i32 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.i32.nxv16i8.i32.iXLen(i32, <vscale x 16 x i8>, <vscale x 16 x i8>, i32, iXLen)
+
+define void @test_sf_vc_ivv_se_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e8m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.i32.nxv32i8.i32.iXLen(i32 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.i32.nxv32i8.i32.iXLen(i32, <vscale x 32 x i8>, <vscale x 32 x i8>, i32, iXLen)
+
+define void @test_sf_vc_ivv_se_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e8m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.i32.nxv64i8.i32.iXLen(i32 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.i32.nxv64i8.i32.iXLen(i32, <vscale x 64 x i8>, <vscale x 64 x i8>, i32, iXLen)
+
+define void @test_sf_vc_ivv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.i32.nxv1i16.i32.iXLen(i32 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.i32.nxv1i16.i32.iXLen(i32, <vscale x 1 x i16>, <vscale x 1 x i16>, i32, iXLen)
+
+define void @test_sf_vc_ivv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.i32.nxv2i16.i32.iXLen(i32 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.i32.nxv2i16.i32.iXLen(i32, <vscale x 2 x i16>, <vscale x 2 x i16>, i32, iXLen)
+
+define void @test_sf_vc_ivv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.i32.nxv4i16.i32.iXLen(i32 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.i32.nxv4i16.i32.iXLen(i32, <vscale x 4 x i16>, <vscale x 4 x i16>, i32, iXLen)
+
+define void @test_sf_vc_ivv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.i32.nxv8i16.i32.iXLen(i32 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.i32.nxv8i16.i32.iXLen(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, i32, iXLen)
+
+define void @test_sf_vc_ivv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.i32.nxv16i16.i32.iXLen(i32 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.i32.nxv16i16.i32.iXLen(i32, <vscale x 16 x i16>, <vscale x 16 x i16>, i32, iXLen)
+
+define void @test_sf_vc_ivv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.i32.nxv32i16.i32.iXLen(i32 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.i32.nxv32i16.i32.iXLen(i32, <vscale x 32 x i16>, <vscale x 32 x i16>, i32, iXLen)
+
+define void @test_sf_vc_ivv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.i32.nxv1i32.i32.iXLen(i32 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.i32.nxv1i32.i32.iXLen(i32, <vscale x 1 x i32>, <vscale x 1 x i32>, i32, iXLen)
+
+define void @test_sf_vc_ivv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.i32.nxv2i32.i32.iXLen(i32 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.i32.nxv2i32.i32.iXLen(i32, <vscale x 2 x i32>, <vscale x 2 x i32>, i32, iXLen)
+
+define void @test_sf_vc_ivv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.i32.nxv4i32.i32.iXLen(i32 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.i32.nxv4i32.i32.iXLen(i32, <vscale x 4 x i32>, <vscale x 4 x i32>, i32, iXLen)
+
+define void @test_sf_vc_ivv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.i32.nxv8i32.i32.iXLen(i32 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.i32.nxv8i32.i32.iXLen(i32, <vscale x 8 x i32>, <vscale x 8 x i32>, i32, iXLen)
+
+define void @test_sf_vc_ivv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.i32.nxv16i32.i32.iXLen(i32 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.i32.nxv16i32.i32.iXLen(i32, <vscale x 16 x i32>, <vscale x 16 x i32>, i32, iXLen)
+
+define void @test_sf_vc_ivv_se_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.i32.nxv1i64.i32.iXLen(i32 3, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.i32.nxv1i64.i32.iXLen(i32, <vscale x 1 x i64>, <vscale x 1 x i64>, i32, iXLen)
+
+define void @test_sf_vc_ivv_se_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.i32.nxv2i64.i32.iXLen(i32 3, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.i32.nxv2i64.i32.iXLen(i32, <vscale x 2 x i64>, <vscale x 2 x i64>, i32, iXLen)
+
+define void @test_sf_vc_ivv_se_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.i32.nxv4i64.i32.iXLen(i32 3, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.i32.nxv4i64.i32.iXLen(i32, <vscale x 4 x i64>, <vscale x 4 x i64>, i32, iXLen)
+
+define void @test_sf_vc_ivv_se_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.ivv 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivv.se.i32.nxv8i64.i32.iXLen(i32 3, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.i32.nxv8i64.i32.iXLen(i32, <vscale x 8 x i64>, <vscale x 8 x i64>, i32, iXLen)
+
+define <vscale x 1 x i8> @test_sf_vc_v_ivv_se_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e8mf8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv1i8.i32.i32.iXLen(i32 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 1 x i8> %0
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv1i8.i32.i32.iXLen(i32, <vscale x 1 x i8>, <vscale x 1 x i8>, i32, iXLen)
+
+define <vscale x 2 x i8> @test_sf_vc_v_ivv_se_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e8mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv2i8.i32.i32.iXLen(i32 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 2 x i8> %0
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv2i8.i32.i32.iXLen(i32, <vscale x 2 x i8>, <vscale x 2 x i8>, i32, iXLen)
+
+define <vscale x 4 x i8> @test_sf_vc_v_ivv_se_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e8mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv4i8.i32.i32.iXLen(i32 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 4 x i8> %0
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv4i8.i32.i32.iXLen(i32, <vscale x 4 x i8>, <vscale x 4 x i8>, i32, iXLen)
+
+define <vscale x 8 x i8> @test_sf_vc_v_ivv_se_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv8i8.i32.i32.iXLen(i32 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 8 x i8> %0
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv8i8.i32.i32.iXLen(i32, <vscale x 8 x i8>, <vscale x 8 x i8>, i32, iXLen)
+
+define <vscale x 16 x i8> @test_sf_vc_v_ivv_se_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv16i8.i32.i32.iXLen(i32 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 16 x i8> %0
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv16i8.i32.i32.iXLen(i32, <vscale x 16 x i8>, <vscale x 16 x i8>, i32, iXLen)
+
+define <vscale x 32 x i8> @test_sf_vc_v_ivv_se_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv32i8.i32.i32.iXLen(i32 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 32 x i8> %0
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv32i8.i32.i32.iXLen(i32, <vscale x 32 x i8>, <vscale x 32 x i8>, i32, iXLen)
+
+define <vscale x 64 x i8> @test_sf_vc_v_ivv_se_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv64i8.i32.i32.iXLen(i32 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 64 x i8> %0
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv64i8.i32.i32.iXLen(i32, <vscale x 64 x i8>, <vscale x 64 x i8>, i32, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_ivv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv1i16.i32.i32.iXLen(i32 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv1i16.i32.i32.iXLen(i32, <vscale x 1 x i16>, <vscale x 1 x i16>, i32, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_ivv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv2i16.i32.i32.iXLen(i32 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv2i16.i32.i32.iXLen(i32, <vscale x 2 x i16>, <vscale x 2 x i16>, i32, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_ivv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv4i16.i32.i32.iXLen(i32 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv4i16.i32.i32.iXLen(i32, <vscale x 4 x i16>, <vscale x 4 x i16>, i32, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_ivv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv8i16.i32.i32.iXLen(i32 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv8i16.i32.i32.iXLen(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, i32, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_ivv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv16i16.i32.i32.iXLen(i32 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv16i16.i32.i32.iXLen(i32, <vscale x 16 x i16>, <vscale x 16 x i16>, i32, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_ivv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv32i16.i32.i32.iXLen(i32 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv32i16.i32.i32.iXLen(i32, <vscale x 32 x i16>, <vscale x 32 x i16>, i32, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_ivv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv1i32.i32.i32.iXLen(i32 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv1i32.i32.i32.iXLen(i32, <vscale x 1 x i32>, <vscale x 1 x i32>, i32, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_ivv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv2i32.i32.i32.iXLen(i32 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv2i32.i32.i32.iXLen(i32, <vscale x 2 x i32>, <vscale x 2 x i32>, i32, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_ivv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv4i32.i32.i32.iXLen(i32 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv4i32.i32.i32.iXLen(i32, <vscale x 4 x i32>, <vscale x 4 x i32>, i32, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_ivv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv8i32.i32.i32.iXLen(i32 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv8i32.i32.i32.iXLen(i32, <vscale x 8 x i32>, <vscale x 8 x i32>, i32, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_ivv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv16i32.i32.i32.iXLen(i32 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv16i32.i32.i32.iXLen(i32, <vscale x 16 x i32>, <vscale x 16 x i32>, i32, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_ivv_se_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv1i64.i32.i32.iXLen(i32 3, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv1i64.i32.i32.iXLen(i32, <vscale x 1 x i64>, <vscale x 1 x i64>, i32, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_ivv_se_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv2i64.i32.i32.iXLen(i32 3, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv2i64.i32.i32.iXLen(i32, <vscale x 2 x i64>, <vscale x 2 x i64>, i32, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_ivv_se_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv4i64.i32.i32.iXLen(i32 3, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv4i64.i32.i32.iXLen(i32, <vscale x 4 x i64>, <vscale x 4 x i64>, i32, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_ivv_se_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv8i64.i32.i32.iXLen(i32 3, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv8i64.i32.i32.iXLen(i32, <vscale x 8 x i64>, <vscale x 8 x i64>, i32, iXLen)
+
+define <vscale x 1 x i8> @test_sf_vc_v_ivv_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e8mf8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.nxv1i8.i32.i32.iXLen(i32 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 1 x i8> %0
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.nxv1i8.i32.i32.iXLen(i32, <vscale x 1 x i8>, <vscale x 1 x i8>, i32, iXLen)
+
+define <vscale x 2 x i8> @test_sf_vc_v_ivv_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e8mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.nxv2i8.i32.i32.iXLen(i32 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 2 x i8> %0
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.nxv2i8.i32.i32.iXLen(i32, <vscale x 2 x i8>, <vscale x 2 x i8>, i32, iXLen)
+
+define <vscale x 4 x i8> @test_sf_vc_v_ivv_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e8mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.nxv4i8.i32.i32.iXLen(i32 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 4 x i8> %0
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.nxv4i8.i32.i32.iXLen(i32, <vscale x 4 x i8>, <vscale x 4 x i8>, i32, iXLen)
+
+define <vscale x 8 x i8> @test_sf_vc_v_ivv_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e8m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.nxv8i8.i32.i32.iXLen(i32 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 8 x i8> %0
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.nxv8i8.i32.i32.iXLen(i32, <vscale x 8 x i8>, <vscale x 8 x i8>, i32, iXLen)
+
+define <vscale x 16 x i8> @test_sf_vc_v_ivv_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e8m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.nxv16i8.i32.i32.iXLen(i32 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 16 x i8> %0
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.nxv16i8.i32.i32.iXLen(i32, <vscale x 16 x i8>, <vscale x 16 x i8>, i32, iXLen)
+
+define <vscale x 32 x i8> @test_sf_vc_v_ivv_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e8m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.nxv32i8.i32.i32.iXLen(i32 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 32 x i8> %0
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.nxv32i8.i32.i32.iXLen(i32, <vscale x 32 x i8>, <vscale x 32 x i8>, i32, iXLen)
+
+define <vscale x 64 x i8> @test_sf_vc_v_ivv_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e8m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.nxv64i8.i32.i32.iXLen(i32 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 64 x i8> %0
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.nxv64i8.i32.i32.iXLen(i32, <vscale x 64 x i8>, <vscale x 64 x i8>, i32, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_ivv_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.nxv1i16.i32.i32.iXLen(i32 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.nxv1i16.i32.i32.iXLen(i32, <vscale x 1 x i16>, <vscale x 1 x i16>, i32, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_ivv_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.nxv2i16.i32.i32.iXLen(i32 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.nxv2i16.i32.i32.iXLen(i32, <vscale x 2 x i16>, <vscale x 2 x i16>, i32, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_ivv_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.nxv4i16.i32.i32.iXLen(i32 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.nxv4i16.i32.i32.iXLen(i32, <vscale x 4 x i16>, <vscale x 4 x i16>, i32, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_ivv_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.nxv8i16.i32.i32.iXLen(i32 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.nxv8i16.i32.i32.iXLen(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, i32, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_ivv_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.nxv16i16.i32.i32.iXLen(i32 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.nxv16i16.i32.i32.iXLen(i32, <vscale x 16 x i16>, <vscale x 16 x i16>, i32, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_ivv_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.nxv32i16.i32.i32.iXLen(i32 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.nxv32i16.i32.i32.iXLen(i32, <vscale x 32 x i16>, <vscale x 32 x i16>, i32, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_ivv_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.nxv1i32.i32.i32.iXLen(i32 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.nxv1i32.i32.i32.iXLen(i32, <vscale x 1 x i32>, <vscale x 1 x i32>, i32, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_ivv_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.nxv2i32.i32.i32.iXLen(i32 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.nxv2i32.i32.i32.iXLen(i32, <vscale x 2 x i32>, <vscale x 2 x i32>, i32, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_ivv_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.nxv4i32.i32.i32.iXLen(i32 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.nxv4i32.i32.i32.iXLen(i32, <vscale x 4 x i32>, <vscale x 4 x i32>, i32, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_ivv_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.nxv8i32.i32.i32.iXLen(i32 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.nxv8i32.i32.i32.iXLen(i32, <vscale x 8 x i32>, <vscale x 8 x i32>, i32, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_ivv_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.nxv16i32.i32.i32.iXLen(i32 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.nxv16i32.i32.i32.iXLen(i32, <vscale x 16 x i32>, <vscale x 16 x i32>, i32, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_ivv_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.nxv1i64.i32.i32.iXLen(i32 3, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.nxv1i64.i32.i32.iXLen(i32, <vscale x 1 x i64>, <vscale x 1 x i64>, i32, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_ivv_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.nxv2i64.i32.i32.iXLen(i32 3, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.nxv2i64.i32.i32.iXLen(i32, <vscale x 2 x i64>, <vscale x 2 x i64>, i32, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_ivv_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.nxv4i64.i32.i32.iXLen(i32 3, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.nxv4i64.i32.i32.iXLen(i32, <vscale x 4 x i64>, <vscale x 4 x i64>, i32, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_ivv_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.nxv8i64.i32.i32.iXLen(i32 3, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.nxv8i64.i32.i32.iXLen(i32, <vscale x 8 x i64>, <vscale x 8 x i64>, i32, iXLen)
+
+define void @test_sf_vc_fvv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.i32.nxv1i16.f16.iXLen(i32 1, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.i32.nxv1i16.f16.iXLen(i32, <vscale x 1 x i16>, <vscale x 1 x i16>, half, iXLen)
+
+define void @test_sf_vc_fvv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.i32.nxv2i16.f16.iXLen(i32 1, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.i32.nxv2i16.f16.iXLen(i32, <vscale x 2 x i16>, <vscale x 2 x i16>, half, iXLen)
+
+define void @test_sf_vc_fvv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.i32.nxv4i16.f16.iXLen(i32 1, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.i32.nxv4i16.f16.iXLen(i32, <vscale x 4 x i16>, <vscale x 4 x i16>, half, iXLen)
+
+define void @test_sf_vc_fvv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.i32.nxv8i16.f16.iXLen(i32 1, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.i32.nxv8i16.f16.iXLen(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, half, iXLen)
+
+define void @test_sf_vc_fvv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.i32.nxv16i16.f16.iXLen(i32 1, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.i32.nxv16i16.f16.iXLen(i32, <vscale x 16 x i16>, <vscale x 16 x i16>, half, iXLen)
+
+define void @test_sf_vc_fvv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.i32.nxv32i16.f16.iXLen(i32 1, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.i32.nxv32i16.f16.iXLen(i32, <vscale x 32 x i16>, <vscale x 32 x i16>, half, iXLen)
+
+define void @test_sf_vc_fvv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.i32.nxv1i32.f32.iXLen(i32 1, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.i32.nxv1i32.f32.iXLen(i32, <vscale x 1 x i32>, <vscale x 1 x i32>, float, iXLen)
+
+define void @test_sf_vc_fvv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.i32.nxv2i32.f32.iXLen(i32 1, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.i32.nxv2i32.f32.iXLen(i32, <vscale x 2 x i32>, <vscale x 2 x i32>, float, iXLen)
+
+define void @test_sf_vc_fvv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.i32.nxv4i32.f32.iXLen(i32 1, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.i32.nxv4i32.f32.iXLen(i32, <vscale x 4 x i32>, <vscale x 4 x i32>, float, iXLen)
+
+define void @test_sf_vc_fvv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.i32.nxv8i32.f32.iXLen(i32 1, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.i32.nxv8i32.f32.iXLen(i32, <vscale x 8 x i32>, <vscale x 8 x i32>, float, iXLen)
+
+define void @test_sf_vc_fvv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.i32.nxv16i32.f32.iXLen(i32 1, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.i32.nxv16i32.f32.iXLen(i32, <vscale x 16 x i32>, <vscale x 16 x i32>, float, iXLen)
+
+define void @test_sf_vc_fvv_se_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.i32.nxv1i64.f64.iXLen(i32 1, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, double %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.i32.nxv1i64.f64.iXLen(i32, <vscale x 1 x i64>, <vscale x 1 x i64>, double, iXLen)
+
+define void @test_sf_vc_fvv_se_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.i32.nxv2i64.f64.iXLen(i32 1, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, double %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.i32.nxv2i64.f64.iXLen(i32, <vscale x 2 x i64>, <vscale x 2 x i64>, double, iXLen)
+
+define void @test_sf_vc_fvv_se_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.i32.nxv4i64.f64.iXLen(i32 1, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, double %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.i32.nxv4i64.f64.iXLen(i32, <vscale x 4 x i64>, <vscale x 4 x i64>, double, iXLen)
+
+define void @test_sf_vc_fvv_se_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.fvv 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvv.se.i32.nxv8i64.f64.iXLen(i32 1, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, double %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.i32.nxv8i64.f64.iXLen(i32, <vscale x 8 x i64>, <vscale x 8 x i64>, double, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_fvv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv1i16.i32.f16.iXLen(i32 1, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv1i16.i32.f16.iXLen(i32, <vscale x 1 x i16>, <vscale x 1 x i16>, half, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_fvv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv2i16.i32.f16.iXLen(i32 1, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv2i16.i32.f16.iXLen(i32, <vscale x 2 x i16>, <vscale x 2 x i16>, half, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_fvv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv4i16.i32.f16.iXLen(i32 1, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv4i16.i32.f16.iXLen(i32, <vscale x 4 x i16>, <vscale x 4 x i16>, half, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_fvv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv8i16.i32.f16.iXLen(i32 1, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv8i16.i32.f16.iXLen(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, half, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_fvv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv16i16.i32.f16.iXLen(i32 1, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv16i16.i32.f16.iXLen(i32, <vscale x 16 x i16>, <vscale x 16 x i16>, half, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_fvv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv32i16.i32.f16.iXLen(i32 1, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv32i16.i32.f16.iXLen(i32, <vscale x 32 x i16>, <vscale x 32 x i16>, half, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_fvv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv1i32.i32.f32.iXLen(i32 1, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv1i32.i32.f32.iXLen(i32, <vscale x 1 x i32>, <vscale x 1 x i32>, float, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_fvv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv2i32.i32.f32.iXLen(i32 1, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv2i32.i32.f32.iXLen(i32, <vscale x 2 x i32>, <vscale x 2 x i32>, float, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_fvv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv4i32.i32.f32.iXLen(i32 1, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv4i32.i32.f32.iXLen(i32, <vscale x 4 x i32>, <vscale x 4 x i32>, float, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_fvv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv8i32.i32.f32.iXLen(i32 1, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv8i32.i32.f32.iXLen(i32, <vscale x 8 x i32>, <vscale x 8 x i32>, float, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_fvv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv16i32.i32.f32.iXLen(i32 1, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv16i32.i32.f32.iXLen(i32, <vscale x 16 x i32>, <vscale x 16 x i32>, float, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_fvv_se_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv1i64.i32.f64.iXLen(i32 1, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv1i64.i32.f64.iXLen(i32, <vscale x 1 x i64>, <vscale x 1 x i64>, double, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_fvv_se_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv2i64.i32.f64.iXLen(i32 1, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv2i64.i32.f64.iXLen(i32, <vscale x 2 x i64>, <vscale x 2 x i64>, double, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_fvv_se_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv4i64.i32.f64.iXLen(i32 1, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv4i64.i32.f64.iXLen(i32, <vscale x 4 x i64>, <vscale x 4 x i64>, double, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_fvv_se_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv8i64.i32.f64.iXLen(i32 1, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv8i64.i32.f64.iXLen(i32, <vscale x 8 x i64>, <vscale x 8 x i64>, double, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_fvv_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.nxv1i16.i32.f16.iXLen(i32 1, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.nxv1i16.i32.f16.iXLen(i32, <vscale x 1 x i16>, <vscale x 1 x i16>, half, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_fvv_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.nxv2i16.i32.f16.iXLen(i32 1, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.nxv2i16.i32.f16.iXLen(i32, <vscale x 2 x i16>, <vscale x 2 x i16>, half, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_fvv_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.nxv4i16.i32.f16.iXLen(i32 1, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.nxv4i16.i32.f16.iXLen(i32, <vscale x 4 x i16>, <vscale x 4 x i16>, half, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_fvv_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.nxv8i16.i32.f16.iXLen(i32 1, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.nxv8i16.i32.f16.iXLen(i32, <vscale x 8 x i16>, <vscale x 8 x i16>, half, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_fvv_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.nxv16i16.i32.f16.iXLen(i32 1, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.nxv16i16.i32.f16.iXLen(i32, <vscale x 16 x i16>, <vscale x 16 x i16>, half, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_fvv_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e16m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.nxv32i16.i32.f16.iXLen(i32 1, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.nxv32i16.i32.f16.iXLen(i32, <vscale x 32 x i16>, <vscale x 32 x i16>, half, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_fvv_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvv.nxv1i32.i32.f32.iXLen(i32 1, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvv.nxv1i32.i32.f32.iXLen(i32, <vscale x 1 x i32>, <vscale x 1 x i32>, float, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_fvv_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvv.nxv2i32.i32.f32.iXLen(i32 1, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvv.nxv2i32.i32.f32.iXLen(i32, <vscale x 2 x i32>, <vscale x 2 x i32>, float, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_fvv_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvv.nxv4i32.i32.f32.iXLen(i32 1, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvv.nxv4i32.i32.f32.iXLen(i32, <vscale x 4 x i32>, <vscale x 4 x i32>, float, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_fvv_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvv.nxv8i32.i32.f32.iXLen(i32 1, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvv.nxv8i32.i32.f32.iXLen(i32, <vscale x 8 x i32>, <vscale x 8 x i32>, float, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_fvv_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e32m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvv.nxv16i32.i32.f32.iXLen(i32 1, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvv.nxv16i32.i32.f32.iXLen(i32, <vscale x 16 x i32>, <vscale x 16 x i32>, float, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_fvv_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e64m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvv.nxv1i64.i32.f64.iXLen(i32 1, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvv.nxv1i64.i32.f64.iXLen(i32, <vscale x 1 x i64>, <vscale x 1 x i64>, double, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_fvv_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e64m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvv.nxv2i64.i32.f64.iXLen(i32 1, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvv.nxv2i64.i32.f64.iXLen(i32, <vscale x 2 x i64>, <vscale x 2 x i64>, double, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_fvv_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e64m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvv.nxv4i64.i32.f64.iXLen(i32 1, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvv.nxv4i64.i32.f64.iXLen(i32, <vscale x 4 x i64>, <vscale x 4 x i64>, double, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_fvv_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e64m8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvv.nxv8i64.i32.f64.iXLen(i32 1, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, double %fs1, iXLen %vl)
+  ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvv.nxv8i64.i32.f64.iXLen(i32, <vscale x 8 x i64>, <vscale x 8 x i64>, double, iXLen)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvw.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvw.ll
new file mode 100644
index 0000000000000..458f07a679003
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvw.ll
@@ -0,0 +1,2111 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN:  sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+xsfvcp \
+; RUN:    -verify-machineinstrs | FileCheck %s
+; RUN:  sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+xsfvcp \
+; RUN:    -verify-machineinstrs | FileCheck %s
+
+define void @test_sf_vc_vvw_se_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e8mf8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    sf.vc.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvw.se.i32.nxv1i16.nxv1i8.nxv1i8.iXLen(i32 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.i32.nxv1i16.nxv1i8.nxv1i8.iXLen(i32, <vscale x 1 x i16>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
+
+define void @test_sf_vc_vvw_se_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e8mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvw.se.i32.nxv2i16.nxv2i8.nxv2i8.iXLen(i32 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.i32.nxv2i16.nxv2i8.nxv2i8.iXLen(i32, <vscale x 2 x i16>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
+
+define void @test_sf_vc_vvw_se_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e8mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvw.se.i32.nxv4i16.nxv4i8.nxv4i8.iXLen(i32 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.i32.nxv4i16.nxv4i8.nxv4i8.iXLen(i32, <vscale x 4 x i16>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
+
+define void @test_sf_vc_vvw_se_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e8m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    sf.vc.vvw 3, v8, v10, v11
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvw.se.i32.nxv8i16.nxv8i8.nxv8i8.iXLen(i32 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.i32.nxv8i16.nxv8i8.nxv8i8.iXLen(i32, <vscale x 8 x i16>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
+
+define void @test_sf_vc_vvw_se_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e8m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    sf.vc.vvw 3, v8, v12, v14
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvw.se.i32.nxv16i16.nxv16i8.nxv16i8.iXLen(i32 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.i32.nxv16i16.nxv16i8.nxv16i8.iXLen(i32, <vscale x 16 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
+
+define void @test_sf_vc_vvw_se_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e8m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    sf.vc.vvw 3, v8, v16, v20
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvw.se.i32.nxv32i16.nxv32i8.nxv32i8.iXLen(i32 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.i32.nxv32i16.nxv32i8.nxv32i8.iXLen(i32, <vscale x 32 x i16>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
+
+define void @test_sf_vc_vvw_se_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvw.se.i32.nxv1i32.nxv1i16.nxv1i16.iXLen(i32 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.i32.nxv1i32.nxv1i16.nxv1i16.iXLen(i32, <vscale x 1 x i32>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
+
+define void @test_sf_vc_vvw_se_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvw.se.i32.nxv2i32.nxv2i16.nxv2i16.iXLen(i32 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.i32.nxv2i32.nxv2i16.nxv2i16.iXLen(i32, <vscale x 2 x i32>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
+
+define void @test_sf_vc_vvw_se_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.vvw 3, v8, v10, v11
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvw.se.i32.nxv4i32.nxv4i16.nxv4i16.iXLen(i32 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.i32.nxv4i32.nxv4i16.nxv4i16.iXLen(i32, <vscale x 4 x i32>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
+
+define void @test_sf_vc_vvw_se_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.vvw 3, v8, v12, v14
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvw.se.i32.nxv8i32.nxv8i16.nxv8i16.iXLen(i32 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.i32.nxv8i32.nxv8i16.nxv8i16.iXLen(i32, <vscale x 8 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
+
+define void @test_sf_vc_vvw_se_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.vvw 3, v8, v16, v20
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvw.se.i32.nxv16i32.nxv16i16.nxv16i16.iXLen(i32 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.i32.nxv16i32.nxv16i16.nxv16i16.iXLen(i32, <vscale x 16 x i32>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
+
+define void @test_sf_vc_vvw_se_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvw.se.i32.nxv1i64.nxv1i32.nxv1i32.iXLen(i32 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.i32.nxv1i64.nxv1i32.nxv1i32.iXLen(i32, <vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
+
+define void @test_sf_vc_vvw_se_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.vvw 3, v8, v10, v11
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvw.se.i32.nxv2i64.nxv2i32.nxv2i32.iXLen(i32 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.i32.nxv2i64.nxv2i32.nxv2i32.iXLen(i32, <vscale x 2 x i64>, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
+
+define void @test_sf_vc_vvw_se_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.vvw 3, v8, v12, v14
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvw.se.i32.nxv4i64.nxv4i32.nxv4i32.iXLen(i32 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.i32.nxv4i64.nxv4i32.nxv4i32.iXLen(i32, <vscale x 4 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
+
+define void @test_sf_vc_vvw_se_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.vvw 3, v8, v16, v20
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.vvw.se.i32.nxv8i64.nxv8i32.nxv8i32.iXLen(i32 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.i32.nxv8i64.nxv8i32.nxv8i32.iXLen(i32, <vscale x 8 x i64>, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_vvw_se_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e8mf8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv1i16.i32.nxv1i8.nxv1i8.iXLen(i32 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
+  ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv1i16.i32.nxv1i8.nxv1i8.iXLen(i32, <vscale x 1 x i16>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_vvw_se_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e8mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv2i16.i32.nxv2i8.nxv2i8.iXLen(i32 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
+  ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv2i16.i32.nxv2i8.nxv2i8.iXLen(i32, <vscale x 2 x i16>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_vvw_se_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e8mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv4i16.i32.nxv4i8.nxv4i8.iXLen(i32 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
+  ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv4i16.i32.nxv4i8.nxv4i8.iXLen(i32, <vscale x 4 x i16>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_vvw_se_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e8m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v10, v11
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv8i16.i32.nxv8i8.nxv8i8.iXLen(i32 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
+  ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv8i16.i32.nxv8i8.nxv8i8.iXLen(i32, <vscale x 8 x i16>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_vvw_se_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e8m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v12, v14
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv16i16.i32.nxv16i8.nxv16i8.iXLen(i32 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
+  ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv16i16.i32.nxv16i8.nxv16i8.iXLen(i32, <vscale x 16 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_vvw_se_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e8m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v16, v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv32i16.i32.nxv32i8.nxv32i8.iXLen(i32 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
+  ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv32i16.i32.nxv32i8.nxv32i8.iXLen(i32, <vscale x 32 x i16>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_vvw_se_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv1i32.i32.nxv1i16.nxv1i16.iXLen(i32 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
+  ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv1i32.i32.nxv1i16.nxv1i16.iXLen(i32, <vscale x 1 x i32>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_vvw_se_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv2i32.i32.nxv2i16.nxv2i16.iXLen(i32 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
+  ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv2i32.i32.nxv2i16.nxv2i16.iXLen(i32, <vscale x 2 x i32>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_vvw_se_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v10, v11
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv4i32.i32.nxv4i16.nxv4i16.iXLen(i32 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
+  ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv4i32.i32.nxv4i16.nxv4i16.iXLen(i32, <vscale x 4 x i32>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_vvw_se_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v12, v14
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv8i32.i32.nxv8i16.nxv8i16.iXLen(i32 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
+  ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv8i32.i32.nxv8i16.nxv8i16.iXLen(i32, <vscale x 8 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_vvw_se_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v16, v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv16i32.i32.nxv16i16.nxv16i16.iXLen(i32 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
+  ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv16i32.i32.nxv16i16.nxv16i16.iXLen(i32, <vscale x 16 x i32>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_vvw_se_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv1i64.i32.nxv1i32.nxv1i32.iXLen(i32 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
+  ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv1i64.i32.nxv1i32.nxv1i32.iXLen(i32, <vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_vvw_se_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v10, v11
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv2i64.i32.nxv2i32.nxv2i32.iXLen(i32 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
+  ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv2i64.i32.nxv2i32.nxv2i32.iXLen(i32, <vscale x 2 x i64>, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_vvw_se_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v12, v14
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv4i64.i32.nxv4i32.nxv4i32.iXLen(i32 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
+  ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv4i64.i32.nxv4i32.nxv4i32.iXLen(i32, <vscale x 4 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_vvw_se_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v16, v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv8i64.i32.nxv8i32.nxv8i32.iXLen(i32 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
+  ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv8i64.i32.nxv8i32.nxv8i32.iXLen(i32, <vscale x 8 x i64>, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_vvw_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e8mf8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvw.nxv1i16.i32.nxv1i8.nxv1i8.iXLen(i32 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
+  ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvw.nxv1i16.i32.nxv1i8.nxv1i8.iXLen(i32, <vscale x 1 x i16>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_vvw_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e8mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvw.nxv2i16.i32.nxv2i8.nxv2i8.iXLen(i32 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
+  ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvw.nxv2i16.i32.nxv2i8.nxv2i8.iXLen(i32, <vscale x 2 x i16>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_vvw_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e8mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvw.nxv4i16.i32.nxv4i8.nxv4i8.iXLen(i32 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
+  ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvw.nxv4i16.i32.nxv4i8.nxv4i8.iXLen(i32, <vscale x 4 x i16>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_vvw_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e8m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v10, v11
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvw.nxv8i16.i32.nxv8i8.nxv8i8.iXLen(i32 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
+  ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvw.nxv8i16.i32.nxv8i8.nxv8i8.iXLen(i32, <vscale x 8 x i16>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_vvw_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e8m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v12, v14
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvw.nxv16i16.i32.nxv16i8.nxv16i8.iXLen(i32 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
+  ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvw.nxv16i16.i32.nxv16i8.nxv16i8.iXLen(i32, <vscale x 16 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_vvw_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e8m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v16, v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvw.nxv32i16.i32.nxv32i8.nxv32i8.iXLen(i32 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
+  ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvw.nxv32i16.i32.nxv32i8.nxv32i8.iXLen(i32, <vscale x 32 x i16>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_vvw_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvw.nxv1i32.i32.nxv1i16.nxv1i16.iXLen(i32 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
+  ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvw.nxv1i32.i32.nxv1i16.nxv1i16.iXLen(i32, <vscale x 1 x i32>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_vvw_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvw.nxv2i32.i32.nxv2i16.nxv2i16.iXLen(i32 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
+  ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvw.nxv2i32.i32.nxv2i16.nxv2i16.iXLen(i32, <vscale x 2 x i32>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_vvw_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v10, v11
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvw.nxv4i32.i32.nxv4i16.nxv4i16.iXLen(i32 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
+  ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvw.nxv4i32.i32.nxv4i16.nxv4i16.iXLen(i32, <vscale x 4 x i32>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_vvw_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v12, v14
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvw.nxv8i32.i32.nxv8i16.nxv8i16.iXLen(i32 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
+  ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvw.nxv8i32.i32.nxv8i16.nxv8i16.iXLen(i32, <vscale x 8 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_vvw_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v16, v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvw.nxv16i32.i32.nxv16i16.nxv16i16.iXLen(i32 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
+  ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvw.nxv16i32.i32.nxv16i16.nxv16i16.iXLen(i32, <vscale x 16 x i32>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_vvw_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvw.nxv1i64.i32.nxv1i32.nxv1i32.iXLen(i32 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
+  ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvw.nxv1i64.i32.nxv1i32.nxv1i32.iXLen(i32, <vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_vvw_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v10, v11
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvw.nxv2i64.i32.nxv2i32.nxv2i32.iXLen(i32 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
+  ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvw.nxv2i64.i32.nxv2i32.nxv2i32.iXLen(i32, <vscale x 2 x i64>, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_vvw_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v12, v14
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvw.nxv4i64.i32.nxv4i32.nxv4i32.iXLen(i32 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
+  ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvw.nxv4i64.i32.nxv4i32.nxv4i32.iXLen(i32, <vscale x 4 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_vvw_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.vvw 3, v8, v16, v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvw.nxv8i64.i32.nxv8i32.nxv8i32.iXLen(i32 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
+  ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvw.nxv8i64.i32.nxv8i32.nxv8i32.iXLen(i32, <vscale x 8 x i64>, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
+
+define void @test_sf_vc_xvw_se_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e8mf8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT:    sf.vc.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvw.se.i32.nxv1i16.nxv1i8.i8.iXLen(i32 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.i32.nxv1i16.nxv1i8.i8.iXLen(i32, <vscale x 1 x i16>, <vscale x 1 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvw_se_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e8mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvw.se.i32.nxv2i16.nxv2i8.i8.iXLen(i32 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.i32.nxv2i16.nxv2i8.i8.iXLen(i32, <vscale x 2 x i16>, <vscale x 2 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvw_se_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e8mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvw.se.i32.nxv4i16.nxv4i8.i8.iXLen(i32 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.i32.nxv4i16.nxv4i8.i8.iXLen(i32, <vscale x 4 x i16>, <vscale x 4 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvw_se_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e8m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT:    sf.vc.xvw 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvw.se.i32.nxv8i16.nxv8i8.i8.iXLen(i32 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.i32.nxv8i16.nxv8i8.i8.iXLen(i32, <vscale x 8 x i16>, <vscale x 8 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvw_se_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e8m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT:    sf.vc.xvw 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvw.se.i32.nxv16i16.nxv16i8.i8.iXLen(i32 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.i32.nxv16i16.nxv16i8.i8.iXLen(i32, <vscale x 16 x i16>, <vscale x 16 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvw_se_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e8m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT:    sf.vc.xvw 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvw.se.i32.nxv32i16.nxv32i8.i8.iXLen(i32 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.i32.nxv32i16.nxv32i8.i8.iXLen(i32, <vscale x 32 x i16>, <vscale x 32 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvw_se_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvw.se.i32.nxv1i32.nxv1i16.i16.iXLen(i32 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.i32.nxv1i32.nxv1i16.i16.iXLen(i32, <vscale x 1 x i32>, <vscale x 1 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xvw_se_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvw.se.i32.nxv2i32.nxv2i16.i16.iXLen(i32 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.i32.nxv2i32.nxv2i16.i16.iXLen(i32, <vscale x 2 x i32>, <vscale x 2 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xvw_se_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.xvw 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvw.se.i32.nxv4i32.nxv4i16.i16.iXLen(i32 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.i32.nxv4i32.nxv4i16.i16.iXLen(i32, <vscale x 4 x i32>, <vscale x 4 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xvw_se_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.xvw 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvw.se.i32.nxv8i32.nxv8i16.i16.iXLen(i32 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.i32.nxv8i32.nxv8i16.i16.iXLen(i32, <vscale x 8 x i32>, <vscale x 8 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xvw_se_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.xvw 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvw.se.i32.nxv16i32.nxv16i16.i16.iXLen(i32 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.i32.nxv16i32.nxv16i16.i16.iXLen(i32, <vscale x 16 x i32>, <vscale x 16 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xvw_se_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvw.se.i32.nxv1i64.nxv1i32.i32.iXLen(i32 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.i32.nxv1i64.nxv1i32.i32.iXLen(i32, <vscale x 1 x i64>, <vscale x 1 x i32>, i32, iXLen)
+
+define void @test_sf_vc_xvw_se_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.xvw 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvw.se.i32.nxv2i64.nxv2i32.i32.iXLen(i32 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.i32.nxv2i64.nxv2i32.i32.iXLen(i32, <vscale x 2 x i64>, <vscale x 2 x i32>, i32, iXLen)
+
+define void @test_sf_vc_xvw_se_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.xvw 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvw.se.i32.nxv4i64.nxv4i32.i32.iXLen(i32 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.i32.nxv4i64.nxv4i32.i32.iXLen(i32, <vscale x 4 x i64>, <vscale x 4 x i32>, i32, iXLen)
+
+define void @test_sf_vc_xvw_se_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.xvw 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.xvw.se.i32.nxv8i64.nxv8i32.i32.iXLen(i32 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.i32.nxv8i64.nxv8i32.i32.iXLen(i32, <vscale x 8 x i64>, <vscale x 8 x i32>, i32, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_xvw_se_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e8mf8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv1i16.i32.nxv1i8.i8.iXLen(i32 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv1i16.i32.nxv1i8.i8.iXLen(i32, <vscale x 1 x i16>, <vscale x 1 x i8>, i8, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_xvw_se_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e8mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv2i16.i32.nxv2i8.i8.iXLen(i32 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv2i16.i32.nxv2i8.i8.iXLen(i32, <vscale x 2 x i16>, <vscale x 2 x i8>, i8, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_xvw_se_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e8mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv4i16.i32.nxv4i8.i8.iXLen(i32 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv4i16.i32.nxv4i8.i8.iXLen(i32, <vscale x 4 x i16>, <vscale x 4 x i8>, i8, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_xvw_se_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e8m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv8i16.i32.nxv8i8.i8.iXLen(i32 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv8i16.i32.nxv8i8.i8.iXLen(i32, <vscale x 8 x i16>, <vscale x 8 x i8>, i8, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_xvw_se_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e8m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv16i16.i32.nxv16i8.i8.iXLen(i32 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv16i16.i32.nxv16i8.i8.iXLen(i32, <vscale x 16 x i16>, <vscale x 16 x i8>, i8, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_xvw_se_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e8m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv32i16.i32.nxv32i8.i8.iXLen(i32 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv32i16.i32.nxv32i8.i8.iXLen(i32, <vscale x 32 x i16>, <vscale x 32 x i8>, i8, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_xvw_se_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv1i32.i32.nxv1i16.i16.iXLen(i32 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv1i32.i32.nxv1i16.i16.iXLen(i32, <vscale x 1 x i32>, <vscale x 1 x i16>, i16, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_xvw_se_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv2i32.i32.nxv2i16.i16.iXLen(i32 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv2i32.i32.nxv2i16.i16.iXLen(i32, <vscale x 2 x i32>, <vscale x 2 x i16>, i16, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_xvw_se_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv4i32.i32.nxv4i16.i16.iXLen(i32 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv4i32.i32.nxv4i16.i16.iXLen(i32, <vscale x 4 x i32>, <vscale x 4 x i16>, i16, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_xvw_se_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv8i32.i32.nxv8i16.i16.iXLen(i32 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv8i32.i32.nxv8i16.i16.iXLen(i32, <vscale x 8 x i32>, <vscale x 8 x i16>, i16, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_xvw_se_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv16i32.i32.nxv16i16.i16.iXLen(i32 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv16i32.i32.nxv16i16.i16.iXLen(i32, <vscale x 16 x i32>, <vscale x 16 x i16>, i16, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_xvw_se_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv1i64.i32.nxv1i32.i32.iXLen(i32 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv1i64.i32.nxv1i32.i32.iXLen(i32, <vscale x 1 x i64>, <vscale x 1 x i32>, i32, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_xvw_se_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv2i64.i32.nxv2i32.i32.iXLen(i32 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv2i64.i32.nxv2i32.i32.iXLen(i32, <vscale x 2 x i64>, <vscale x 2 x i32>, i32, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_xvw_se_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv4i64.i32.nxv4i32.i32.iXLen(i32 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv4i64.i32.nxv4i32.i32.iXLen(i32, <vscale x 4 x i64>, <vscale x 4 x i32>, i32, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_xvw_se_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv8i64.i32.nxv8i32.i32.iXLen(i32 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv8i64.i32.nxv8i32.i32.iXLen(i32, <vscale x 8 x i64>, <vscale x 8 x i32>, i32, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_xvw_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e8mf8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvw.nxv1i16.i32.nxv1i8.i8.iXLen(i32 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvw.nxv1i16.i32.nxv1i8.i8.iXLen(i32, <vscale x 1 x i16>, <vscale x 1 x i8>, i8, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_xvw_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e8mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvw.nxv2i16.i32.nxv2i8.i8.iXLen(i32 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvw.nxv2i16.i32.nxv2i8.i8.iXLen(i32, <vscale x 2 x i16>, <vscale x 2 x i8>, i8, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_xvw_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e8mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvw.nxv4i16.i32.nxv4i8.i8.iXLen(i32 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvw.nxv4i16.i32.nxv4i8.i8.iXLen(i32, <vscale x 4 x i16>, <vscale x 4 x i8>, i8, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_xvw_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e8m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvw.nxv8i16.i32.nxv8i8.i8.iXLen(i32 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvw.nxv8i16.i32.nxv8i8.i8.iXLen(i32, <vscale x 8 x i16>, <vscale x 8 x i8>, i8, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_xvw_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e8m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvw.nxv16i16.i32.nxv16i8.i8.iXLen(i32 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvw.nxv16i16.i32.nxv16i8.i8.iXLen(i32, <vscale x 16 x i16>, <vscale x 16 x i8>, i8, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_xvw_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e8m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvw.nxv32i16.i32.nxv32i8.i8.iXLen(i32 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
+  ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvw.nxv32i16.i32.nxv32i8.i8.iXLen(i32, <vscale x 32 x i16>, <vscale x 32 x i8>, i8, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_xvw_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvw.nxv1i32.i32.nxv1i16.i16.iXLen(i32 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvw.nxv1i32.i32.nxv1i16.i16.iXLen(i32, <vscale x 1 x i32>, <vscale x 1 x i16>, i16, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_xvw_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvw.nxv2i32.i32.nxv2i16.i16.iXLen(i32 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvw.nxv2i32.i32.nxv2i16.i16.iXLen(i32, <vscale x 2 x i32>, <vscale x 2 x i16>, i16, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_xvw_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvw.nxv4i32.i32.nxv4i16.i16.iXLen(i32 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvw.nxv4i32.i32.nxv4i16.i16.iXLen(i32, <vscale x 4 x i32>, <vscale x 4 x i16>, i16, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_xvw_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvw.nxv8i32.i32.nxv8i16.i16.iXLen(i32 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvw.nxv8i32.i32.nxv8i16.i16.iXLen(i32, <vscale x 8 x i32>, <vscale x 8 x i16>, i16, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_xvw_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvw.nxv16i32.i32.nxv16i16.i16.iXLen(i32 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
+  ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvw.nxv16i32.i32.nxv16i16.i16.iXLen(i32, <vscale x 16 x i32>, <vscale x 16 x i16>, i16, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_xvw_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvw.nxv1i64.i32.nxv1i32.i32.iXLen(i32 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvw.nxv1i64.i32.nxv1i32.i32.iXLen(i32, <vscale x 1 x i64>, <vscale x 1 x i32>, i32, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_xvw_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvw.nxv2i64.i32.nxv2i32.i32.iXLen(i32 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvw.nxv2i64.i32.nxv2i32.i32.iXLen(i32, <vscale x 2 x i64>, <vscale x 2 x i32>, i32, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_xvw_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvw.nxv4i64.i32.nxv4i32.i32.iXLen(i32 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvw.nxv4i64.i32.nxv4i32.i32.iXLen(i32, <vscale x 4 x i64>, <vscale x 4 x i32>, i32, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_xvw_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.xvw 3, v8, v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvw.nxv8i64.i32.nxv8i32.i32.iXLen(i32 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
+  ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvw.nxv8i64.i32.nxv8i32.i32.iXLen(i32, <vscale x 8 x i64>, <vscale x 8 x i32>, i32, iXLen)
+
+define void @test_sf_vc_ivw_se_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e8mf8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    sf.vc.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivw.se.i32.nxv1i16.nxv1i8.i32.iXLen(i32 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.i32.nxv1i16.nxv1i8.i32.iXLen(i32, <vscale x 1 x i16>, <vscale x 1 x i8>, i32, iXLen)
+
+define void @test_sf_vc_ivw_se_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e8mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivw.se.i32.nxv2i16.nxv2i8.i32.iXLen(i32 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.i32.nxv2i16.nxv2i8.i32.iXLen(i32, <vscale x 2 x i16>, <vscale x 2 x i8>, i32, iXLen)
+
+define void @test_sf_vc_ivw_se_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e8mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivw.se.i32.nxv4i16.nxv4i8.i32.iXLen(i32 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.i32.nxv4i16.nxv4i8.i32.iXLen(i32, <vscale x 4 x i16>, <vscale x 4 x i8>, i32, iXLen)
+
+define void @test_sf_vc_ivw_se_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e8m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    sf.vc.ivw 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivw.se.i32.nxv8i16.nxv8i8.i32.iXLen(i32 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.i32.nxv8i16.nxv8i8.i32.iXLen(i32, <vscale x 8 x i16>, <vscale x 8 x i8>, i32, iXLen)
+
+define void @test_sf_vc_ivw_se_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e8m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    sf.vc.ivw 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivw.se.i32.nxv16i16.nxv16i8.i32.iXLen(i32 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.i32.nxv16i16.nxv16i8.i32.iXLen(i32, <vscale x 16 x i16>, <vscale x 16 x i8>, i32, iXLen)
+
+define void @test_sf_vc_ivw_se_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e8m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    sf.vc.ivw 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivw.se.i32.nxv32i16.nxv32i8.i32.iXLen(i32 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.i32.nxv32i16.nxv32i8.i32.iXLen(i32, <vscale x 32 x i16>, <vscale x 32 x i8>, i32, iXLen)
+
+define void @test_sf_vc_ivw_se_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivw.se.i32.nxv1i32.nxv1i16.i32.iXLen(i32 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.i32.nxv1i32.nxv1i16.i32.iXLen(i32, <vscale x 1 x i32>, <vscale x 1 x i16>, i32, iXLen)
+
+define void @test_sf_vc_ivw_se_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivw.se.i32.nxv2i32.nxv2i16.i32.iXLen(i32 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.i32.nxv2i32.nxv2i16.i32.iXLen(i32, <vscale x 2 x i32>, <vscale x 2 x i16>, i32, iXLen)
+
+define void @test_sf_vc_ivw_se_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.ivw 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivw.se.i32.nxv4i32.nxv4i16.i32.iXLen(i32 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.i32.nxv4i32.nxv4i16.i32.iXLen(i32, <vscale x 4 x i32>, <vscale x 4 x i16>, i32, iXLen)
+
+define void @test_sf_vc_ivw_se_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.ivw 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivw.se.i32.nxv8i32.nxv8i16.i32.iXLen(i32 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.i32.nxv8i32.nxv8i16.i32.iXLen(i32, <vscale x 8 x i32>, <vscale x 8 x i16>, i32, iXLen)
+
+define void @test_sf_vc_ivw_se_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.ivw 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivw.se.i32.nxv16i32.nxv16i16.i32.iXLen(i32 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.i32.nxv16i32.nxv16i16.i32.iXLen(i32, <vscale x 16 x i32>, <vscale x 16 x i16>, i32, iXLen)
+
+define void @test_sf_vc_ivw_se_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivw.se.i32.nxv1i64.nxv1i32.i32.iXLen(i32 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.i32.nxv1i64.nxv1i32.i32.iXLen(i32, <vscale x 1 x i64>, <vscale x 1 x i32>, i32, iXLen)
+
+define void @test_sf_vc_ivw_se_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.ivw 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivw.se.i32.nxv2i64.nxv2i32.i32.iXLen(i32 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.i32.nxv2i64.nxv2i32.i32.iXLen(i32, <vscale x 2 x i64>, <vscale x 2 x i32>, i32, iXLen)
+
+define void @test_sf_vc_ivw_se_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.ivw 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivw.se.i32.nxv4i64.nxv4i32.i32.iXLen(i32 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.i32.nxv4i64.nxv4i32.i32.iXLen(i32, <vscale x 4 x i64>, <vscale x 4 x i32>, i32, iXLen)
+
+define void @test_sf_vc_ivw_se_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.ivw 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.ivw.se.i32.nxv8i64.nxv8i32.i32.iXLen(i32 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, i32 10, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.i32.nxv8i64.nxv8i32.i32.iXLen(i32, <vscale x 8 x i64>, <vscale x 8 x i32>, i32, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_ivw_se_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e8mf8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv1i16.i32.nxv1i8.i32.iXLen(i32 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv1i16.i32.nxv1i8.i32.iXLen(i32, <vscale x 1 x i16>, <vscale x 1 x i8>, i32, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_ivw_se_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e8mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv2i16.i32.nxv2i8.i32.iXLen(i32 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv2i16.i32.nxv2i8.i32.iXLen(i32, <vscale x 2 x i16>, <vscale x 2 x i8>, i32, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_ivw_se_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e8mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv4i16.i32.nxv4i8.i32.iXLen(i32 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv4i16.i32.nxv4i8.i32.iXLen(i32, <vscale x 4 x i16>, <vscale x 4 x i8>, i32, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_ivw_se_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e8m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv8i16.i32.nxv8i8.i32.iXLen(i32 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv8i16.i32.nxv8i8.i32.iXLen(i32, <vscale x 8 x i16>, <vscale x 8 x i8>, i32, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_ivw_se_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e8m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv16i16.i32.nxv16i8.i32.iXLen(i32 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv16i16.i32.nxv16i8.i32.iXLen(i32, <vscale x 16 x i16>, <vscale x 16 x i8>, i32, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_ivw_se_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e8m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv32i16.i32.nxv32i8.i32.iXLen(i32 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv32i16.i32.nxv32i8.i32.iXLen(i32, <vscale x 32 x i16>, <vscale x 32 x i8>, i32, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_ivw_se_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv1i32.i32.nxv1i16.i32.iXLen(i32 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv1i32.i32.nxv1i16.i32.iXLen(i32, <vscale x 1 x i32>, <vscale x 1 x i16>, i32, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_ivw_se_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv2i32.i32.nxv2i16.i32.iXLen(i32 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv2i32.i32.nxv2i16.i32.iXLen(i32, <vscale x 2 x i32>, <vscale x 2 x i16>, i32, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_ivw_se_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv4i32.i32.nxv4i16.i32.iXLen(i32 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv4i32.i32.nxv4i16.i32.iXLen(i32, <vscale x 4 x i32>, <vscale x 4 x i16>, i32, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_ivw_se_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv8i32.i32.nxv8i16.i32.iXLen(i32 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv8i32.i32.nxv8i16.i32.iXLen(i32, <vscale x 8 x i32>, <vscale x 8 x i16>, i32, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_ivw_se_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv16i32.i32.nxv16i16.i32.iXLen(i32 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv16i32.i32.nxv16i16.i32.iXLen(i32, <vscale x 16 x i32>, <vscale x 16 x i16>, i32, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_ivw_se_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv1i64.i32.nxv1i32.i32.iXLen(i32 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv1i64.i32.nxv1i32.i32.iXLen(i32, <vscale x 1 x i64>, <vscale x 1 x i32>, i32, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_ivw_se_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv2i64.i32.nxv2i32.i32.iXLen(i32 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv2i64.i32.nxv2i32.i32.iXLen(i32, <vscale x 2 x i64>, <vscale x 2 x i32>, i32, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_ivw_se_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv4i64.i32.nxv4i32.i32.iXLen(i32 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv4i64.i32.nxv4i32.i32.iXLen(i32, <vscale x 4 x i64>, <vscale x 4 x i32>, i32, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_ivw_se_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv8i64.i32.nxv8i32.i32.iXLen(i32 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv8i64.i32.nxv8i32.i32.iXLen(i32, <vscale x 8 x i64>, <vscale x 8 x i32>, i32, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_ivw_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e8mf8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivw.nxv1i16.i32.nxv1i8.i32.iXLen(i32 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivw.nxv1i16.i32.nxv1i8.i32.iXLen(i32, <vscale x 1 x i16>, <vscale x 1 x i8>, i32, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_ivw_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e8mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivw.nxv2i16.i32.nxv2i8.i32.iXLen(i32 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivw.nxv2i16.i32.nxv2i8.i32.iXLen(i32, <vscale x 2 x i16>, <vscale x 2 x i8>, i32, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_ivw_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e8mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivw.nxv4i16.i32.nxv4i8.i32.iXLen(i32 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivw.nxv4i16.i32.nxv4i8.i32.iXLen(i32, <vscale x 4 x i16>, <vscale x 4 x i8>, i32, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_ivw_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e8m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivw.nxv8i16.i32.nxv8i8.i32.iXLen(i32 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivw.nxv8i16.i32.nxv8i8.i32.iXLen(i32, <vscale x 8 x i16>, <vscale x 8 x i8>, i32, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_ivw_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e8m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivw.nxv16i16.i32.nxv16i8.i32.iXLen(i32 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivw.nxv16i16.i32.nxv16i8.i32.iXLen(i32, <vscale x 16 x i16>, <vscale x 16 x i8>, i32, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_ivw_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e8m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivw.nxv32i16.i32.nxv32i8.i32.iXLen(i32 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivw.nxv32i16.i32.nxv32i8.i32.iXLen(i32, <vscale x 32 x i16>, <vscale x 32 x i8>, i32, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_ivw_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivw.nxv1i32.i32.nxv1i16.i32.iXLen(i32 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivw.nxv1i32.i32.nxv1i16.i32.iXLen(i32, <vscale x 1 x i32>, <vscale x 1 x i16>, i32, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_ivw_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivw.nxv2i32.i32.nxv2i16.i32.iXLen(i32 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivw.nxv2i32.i32.nxv2i16.i32.iXLen(i32, <vscale x 2 x i32>, <vscale x 2 x i16>, i32, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_ivw_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivw.nxv4i32.i32.nxv4i16.i32.iXLen(i32 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivw.nxv4i32.i32.nxv4i16.i32.iXLen(i32, <vscale x 4 x i32>, <vscale x 4 x i16>, i32, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_ivw_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivw.nxv8i32.i32.nxv8i16.i32.iXLen(i32 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivw.nxv8i32.i32.nxv8i16.i32.iXLen(i32, <vscale x 8 x i32>, <vscale x 8 x i16>, i32, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_ivw_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivw.nxv16i32.i32.nxv16i16.i32.iXLen(i32 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivw.nxv16i32.i32.nxv16i16.i32.iXLen(i32, <vscale x 16 x i32>, <vscale x 16 x i16>, i32, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_ivw_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivw.nxv1i64.i32.nxv1i32.i32.iXLen(i32 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivw.nxv1i64.i32.nxv1i32.i32.iXLen(i32, <vscale x 1 x i64>, <vscale x 1 x i32>, i32, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_ivw_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v10, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivw.nxv2i64.i32.nxv2i32.i32.iXLen(i32 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivw.nxv2i64.i32.nxv2i32.i32.iXLen(i32, <vscale x 2 x i64>, <vscale x 2 x i32>, i32, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_ivw_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v12, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivw.nxv4i64.i32.nxv4i32.i32.iXLen(i32 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivw.nxv4i64.i32.nxv4i32.i32.iXLen(i32, <vscale x 4 x i64>, <vscale x 4 x i32>, i32, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_ivw_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.ivw 3, v8, v16, 10
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivw.nxv8i64.i32.nxv8i32.i32.iXLen(i32 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, i32 10, iXLen %vl)
+  ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivw.nxv8i64.i32.nxv8i32.i32.iXLen(i32, <vscale x 8 x i64>, <vscale x 8 x i32>, i32, iXLen)
+
+define void @test_sf_vc_fvw_se_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvw_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    sf.vc.fvw 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvw.se.i32.nxv1i32.nxv1i16.f16.iXLen(i32 1, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.i32.nxv1i32.nxv1i16.f16.iXLen(i32, <vscale x 1 x i32>, <vscale x 1 x i16>, half, iXLen)
+
+define void @test_sf_vc_fvw_se_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvw_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.fvw 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvw.se.i32.nxv2i32.nxv2i16.f16.iXLen(i32 1, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.i32.nxv2i32.nxv2i16.f16.iXLen(i32, <vscale x 2 x i32>, <vscale x 2 x i16>, half, iXLen)
+
+define void @test_sf_vc_fvw_se_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvw_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    sf.vc.fvw 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvw.se.i32.nxv4i32.nxv4i16.f16.iXLen(i32 1, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.i32.nxv4i32.nxv4i16.f16.iXLen(i32, <vscale x 4 x i32>, <vscale x 4 x i16>, half, iXLen)
+
+define void @test_sf_vc_fvw_se_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvw_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    sf.vc.fvw 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvw.se.i32.nxv8i32.nxv8i16.f16.iXLen(i32 1, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.i32.nxv8i32.nxv8i16.f16.iXLen(i32, <vscale x 8 x i32>, <vscale x 8 x i16>, half, iXLen)
+
+define void @test_sf_vc_fvw_se_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvw_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    sf.vc.fvw 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvw.se.i32.nxv16i32.nxv16i16.f16.iXLen(i32 1, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.i32.nxv16i32.nxv16i16.f16.iXLen(i32, <vscale x 16 x i32>, <vscale x 16 x i16>, half, iXLen)
+
+define void @test_sf_vc_fvw_se_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvw_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    sf.vc.fvw 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvw.se.i32.nxv1i64.nxv1i32.f32.iXLen(i32 1, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.i32.nxv1i64.nxv1i32.f32.iXLen(i32, <vscale x 1 x i64>, <vscale x 1 x i32>, float, iXLen)
+
+define void @test_sf_vc_fvw_se_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvw_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    sf.vc.fvw 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvw.se.i32.nxv2i64.nxv2i32.f32.iXLen(i32 1, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.i32.nxv2i64.nxv2i32.f32.iXLen(i32, <vscale x 2 x i64>, <vscale x 2 x i32>, float, iXLen)
+
+define void @test_sf_vc_fvw_se_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvw_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    sf.vc.fvw 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvw.se.i32.nxv4i64.nxv4i32.f32.iXLen(i32 1, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.i32.nxv4i64.nxv4i32.f32.iXLen(i32, <vscale x 4 x i64>, <vscale x 4 x i32>, float, iXLen)
+
+define void @test_sf_vc_fvw_se_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvw_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    sf.vc.fvw 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.sf.vc.fvw.se.i32.nxv8i64.nxv8i32.f32.iXLen(i32 1, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl)
+  ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.i32.nxv8i64.nxv8i32.f32.iXLen(i32, <vscale x 8 x i64>, <vscale x 8 x i32>, float, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_fvw_se_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_se_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv1i32.i32.nxv1i16.f16.iXLen(i32 1, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv1i32.i32.nxv1i16.f16.iXLen(i32, <vscale x 1 x i32>, <vscale x 1 x i16>, half, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_fvw_se_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_se_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv2i32.i32.nxv2i16.f16.iXLen(i32 1, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv2i32.i32.nxv2i16.f16.iXLen(i32, <vscale x 2 x i32>, <vscale x 2 x i16>, half, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_fvw_se_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_se_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv4i32.i32.nxv4i16.f16.iXLen(i32 1, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv4i32.i32.nxv4i16.f16.iXLen(i32, <vscale x 4 x i32>, <vscale x 4 x i16>, half, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_fvw_se_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_se_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv8i32.i32.nxv8i16.f16.iXLen(i32 1, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv8i32.i32.nxv8i16.f16.iXLen(i32, <vscale x 8 x i32>, <vscale x 8 x i16>, half, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_fvw_se_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_se_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv16i32.i32.nxv16i16.f16.iXLen(i32 1, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv16i32.i32.nxv16i16.f16.iXLen(i32, <vscale x 16 x i32>, <vscale x 16 x i16>, half, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_fvw_se_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_se_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv1i64.i32.nxv1i32.f32.iXLen(i32 1, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv1i64.i32.nxv1i32.f32.iXLen(i32, <vscale x 1 x i64>, <vscale x 1 x i32>, float, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_fvw_se_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_se_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv2i64.i32.nxv2i32.f32.iXLen(i32 1, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv2i64.i32.nxv2i32.f32.iXLen(i32, <vscale x 2 x i64>, <vscale x 2 x i32>, float, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_fvw_se_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_se_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv4i64.i32.nxv4i32.f32.iXLen(i32 1, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv4i64.i32.nxv4i32.f32.iXLen(i32, <vscale x 4 x i64>, <vscale x 4 x i32>, float, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_fvw_se_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_se_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv8i64.i32.nxv8i32.f32.iXLen(i32 1, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv8i64.i32.nxv8i32.f32.iXLen(i32, <vscale x 8 x i64>, <vscale x 8 x i32>, float, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_fvw_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_e16mf4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvw.nxv1i32.i32.nxv1i16.f16.iXLen(i32 1, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvw.nxv1i32.i32.nxv1i16.f16.iXLen(i32, <vscale x 1 x i32>, <vscale x 1 x i16>, half, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_fvw_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_e16mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvw.nxv2i32.i32.nxv2i16.f16.iXLen(i32 1, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvw.nxv2i32.i32.nxv2i16.f16.iXLen(i32, <vscale x 2 x i32>, <vscale x 2 x i16>, half, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_fvw_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_e16m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvw.nxv4i32.i32.nxv4i16.f16.iXLen(i32 1, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvw.nxv4i32.i32.nxv4i16.f16.iXLen(i32, <vscale x 4 x i32>, <vscale x 4 x i16>, half, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_fvw_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_e16m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvw.nxv8i32.i32.nxv8i16.f16.iXLen(i32 1, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvw.nxv8i32.i32.nxv8i16.f16.iXLen(i32, <vscale x 8 x i32>, <vscale x 8 x i16>, half, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_fvw_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_e16m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvw.nxv16i32.i32.nxv16i16.f16.iXLen(i32 1, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl)
+  ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvw.nxv16i32.i32.nxv16i16.f16.iXLen(i32, <vscale x 16 x i32>, <vscale x 16 x i16>, half, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_fvw_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_e32mf2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvw.nxv1i64.i32.nxv1i32.f32.iXLen(i32 1, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvw.nxv1i64.i32.nxv1i32.f32.iXLen(i32, <vscale x 1 x i64>, <vscale x 1 x i32>, float, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_fvw_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_e32m1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v10, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvw.nxv2i64.i32.nxv2i32.f32.iXLen(i32 1, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvw.nxv2i64.i32.nxv2i32.f32.iXLen(i32, <vscale x 2 x i64>, <vscale x 2 x i32>, float, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_fvw_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_e32m2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v12, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvw.nxv4i64.i32.nxv4i32.f32.iXLen(i32 1, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvw.nxv4i64.i32.nxv4i32.f32.iXLen(i32, <vscale x 4 x i64>, <vscale x 4 x i32>, float, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_fvw_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_e32m4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    sf.vc.v.fvw 1, v8, v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvw.nxv8i64.i32.nxv8i32.f32.iXLen(i32 1, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl)
+  ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvw.nxv8i64.i32.nxv8i32.f32.iXLen(i32, <vscale x 8 x i64>, <vscale x 8 x i32>, float, iXLen)


        


More information about the llvm-branch-commits mailing list