[llvm] a1615b5 - [RISCV] Support LLVM IR intrinsics for xsfvcp extension.
via llvm-commits
llvm-commits at lists.llvm.org
Mon Apr 24 03:10:23 PDT 2023
Author: Nelson Chu
Date: 2023-04-24T03:10:13-07:00
New Revision: a1615b5210118e8fbc4c78c8cabb0c8d00061e6c
URL: https://github.com/llvm/llvm-project/commit/a1615b5210118e8fbc4c78c8cabb0c8d00061e6c
DIFF: https://github.com/llvm/llvm-project/commit/a1615b5210118e8fbc4c78c8cabb0c8d00061e6c.diff
LOG: [RISCV] Support LLVM IR intrinsics for xsfvcp extension.
The sf.vc.x and sf.vc.i intrinsics don't have any vector types in the output
and inputs, but the intrinsic spec said that we still need to add vetvli for
them. Therefore, we need to encode the VTYPE to these IR names.
Differential Revision: https://reviews.llvm.org/D147934
Added:
llvm/include/llvm/IR/IntrinsicsRISCVXsf.td
llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll
llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv.ll
llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll
llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvw.ll
Modified:
llvm/include/llvm/IR/IntrinsicsRISCV.td
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
Removed:
################################################################################
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index 4791dc75b849a..243cba5c62bb2 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -1589,3 +1589,4 @@ def int_riscv_sm3p1 : ScalarCryptoGprIntrinsicAny;
// Vendor extensions
//===----------------------------------------------------------------------===//
include "llvm/IR/IntrinsicsRISCVXTHead.td"
+include "llvm/IR/IntrinsicsRISCVXsf.td"
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td b/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td
new file mode 100644
index 0000000000000..67105944616c3
--- /dev/null
+++ b/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td
@@ -0,0 +1,123 @@
+class VCIXSuffix<string range> {
+ list<string> suffix = !cond(!eq(range, "c"): ["e8mf8", "e8mf4", "e8mf2", "e8m1", "e8m2", "e8m4", "e8m8"],
+ !eq(range, "s"): ["e16mf4", "e16mf2", "e16m1", "e16m2", "e16m4", "e16m8"],
+ !eq(range, "i"): ["e32mf2", "e32m1", "e32m2", "e32m4", "e32m8"],
+ !eq(range, "l"): ["e64m1", "e64m2", "e64m4", "e64m8"]);
+}
+
+let TargetPrefix = "riscv" in {
+ // Output: (vector_out) or ()
+ // Input: (bit<27-26>, bit<24-20>, scalar_in, vl) or
+ // (bit<27-26>, bit<24-20>, bit<11-7>, scalar_in, vl)
+ class RISCVSFCustomVC_X<bit HasDst, bit HasSE, bit ImmScalar>
+ : Intrinsic<!if(HasDst, [llvm_anyvector_ty], []),
+ !listconcat(!if(HasDst, [llvm_anyint_ty, LLVMMatchType<1>],
+ [llvm_anyint_ty, LLVMMatchType<0>, LLVMMatchType<0>]),
+ [llvm_any_ty, llvm_anyint_ty]),
+ !listconcat([IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>], // bit<27-26> and bit<24-20>
+ !if(HasDst, [], [ImmArg<ArgIndex<2>>]), // Vd or bit<11-7>
+ !if(ImmScalar, !if(HasDst, [ImmArg<ArgIndex<2>>],
+ [ImmArg<ArgIndex<3>>]), []), // ScalarOperand
+ !if(HasSE, [IntrHasSideEffects], []))>,
+ RISCVVIntrinsic {
+ let ScalarOperand = !cond(ImmScalar: NoScalarOperand,
+ HasDst: 2,
+ true: 3);
+ let VLOperand = !if(HasDst, 3, 4);
+ }
+ // Output: (vector_out) or ()
+ // Input: (bit<27-26>, vector_in, vector_in/scalar_in, vl) or
+ // (bit<27-26>, bit<11-7>, vector_in, vector_in/scalar_in, vl)
+ class RISCVSFCustomVC_XV<bit HasDst, bit HasSE, bit ImmScalar>
+ : Intrinsic<!if(HasDst, [llvm_anyvector_ty], []),
+ !listconcat(!if(HasDst, [llvm_anyint_ty, LLVMMatchType<0>],
+ [llvm_anyint_ty, LLVMMatchType<0>, llvm_anyvector_ty]),
+ [llvm_any_ty, llvm_anyint_ty]),
+ !listconcat([IntrNoMem, ImmArg<ArgIndex<0>>], // bit<27-26>
+ !if(HasDst, [], [ImmArg<ArgIndex<1>>]), // Vd or bit<11-7>
+ !if(ImmScalar, !if(HasDst, [ImmArg<ArgIndex<2>>],
+ [ImmArg<ArgIndex<3>>]), []), // ScalarOperand
+ !if(HasSE, [IntrHasSideEffects], []))>,
+ RISCVVIntrinsic {
+ let ScalarOperand = !cond(ImmScalar: NoScalarOperand,
+ HasDst: 2,
+ true: 3);
+ let VLOperand = !if(HasDst, 3, 4);
+ }
+ // Output: (vector_out) or ()
+ // Input: (bit<27-26>, passthru, vector_in, vector_in/scalar_in, vl) or
+ // (bit<27-26>, vector_in, vector_in, vector_in/scalar_in, vl)
+ class RISCVSFCustomVC_XVV<bit HasDst, bit HasSE, bit ImmScalar>
+ : Intrinsic<!if(HasDst, [llvm_anyvector_ty], []),
+ !listconcat(!if(HasDst, [llvm_anyint_ty, LLVMMatchType<0>, LLVMMatchType<0>],
+ [llvm_anyint_ty, llvm_anyvector_ty, LLVMMatchType<1>]),
+ [llvm_any_ty, llvm_anyint_ty]),
+ !listconcat([IntrNoMem, ImmArg<ArgIndex<0>>], // bit<27-26>
+ !if(ImmScalar, [ImmArg<ArgIndex<3>>], []), // ScalarOperand
+ !if(HasSE, [IntrHasSideEffects], []))>,
+ RISCVVIntrinsic {
+ let ScalarOperand = !if(ImmScalar, NoScalarOperand, 3);
+ let VLOperand = 4;
+ }
+ // Output: (wvector_out) or ()
+ // Input: (bit<27-26>, passthru, vector_in, vector_in/scalar_in, vl) or
+ // (bit<27-26>, wvector_in, vector_in, vector_in/scalar_in, vl)
+ class RISCVSFCustomVC_XVW<bit HasDst, bit HasSE, bit ImmScalar>
+ : Intrinsic<!if(HasDst, [llvm_anyvector_ty], []),
+ !listconcat(!if(HasDst, [llvm_anyint_ty, LLVMMatchType<0>, llvm_anyvector_ty],
+ [llvm_anyint_ty, llvm_anyvector_ty, llvm_anyvector_ty]),
+ [llvm_any_ty, llvm_anyint_ty]),
+ !listconcat([IntrNoMem, ImmArg<ArgIndex<0>>], // bit<27-26>
+ !if(ImmScalar, [ImmArg<ArgIndex<3>>], []), // ScalarOperand
+ !if(HasSE, [IntrHasSideEffects], []))>,
+ RISCVVIntrinsic {
+ let ScalarOperand = !if(ImmScalar, NoScalarOperand, 3);
+ let VLOperand = 4;
+ }
+
+ multiclass RISCVSFCustomVC_X<list<string> type> {
+ foreach t = type in {
+ defvar ImmScalar = !eq(t, "i");
+ defvar range = ["c", "s", "i", "l"];
+ foreach r = range in {
+ foreach s = VCIXSuffix<r>.suffix in {
+ def "int_riscv_sf_vc_" # t # "_se_" # s : RISCVSFCustomVC_X</*HasDst*/0, /*HasSE*/1, ImmScalar>;
+ }
+ }
+ def "int_riscv_sf_vc_v_" # t # "_se" : RISCVSFCustomVC_X</*HasDst*/1, /*HasSE*/1, ImmScalar>;
+ def "int_riscv_sf_vc_v_" # t : RISCVSFCustomVC_X</*HasDst*/1, /*HasSE*/0, ImmScalar>;
+ }
+ }
+
+ multiclass RISCVSFCustomVC_XV<list<string> type> {
+ foreach t = type in {
+ defvar ImmScalar = !eq(t, "i");
+ def "int_riscv_sf_vc_" # t # "v_se" : RISCVSFCustomVC_XV</*HasDst*/0, /*HasSE*/1, ImmScalar>;
+ def "int_riscv_sf_vc_v_" # t # "v_se" : RISCVSFCustomVC_XV</*HasDst*/1, /*HasSE*/1, ImmScalar>;
+ def "int_riscv_sf_vc_v_" # t # "v" : RISCVSFCustomVC_XV</*HasDst*/1, /*HasSE*/0, ImmScalar>;
+ }
+ }
+
+ multiclass RISCVSFCustomVC_XVV<list<string> type> {
+ foreach t = type in {
+ defvar ImmScalar = !eq(t, "i");
+ def "int_riscv_sf_vc_" # t # "vv_se" : RISCVSFCustomVC_XVV</*HasDst*/0, /*HasSE*/1, ImmScalar>;
+ def "int_riscv_sf_vc_v_" # t # "vv_se" : RISCVSFCustomVC_XVV</*HasDst*/1, /*HasSE*/1, ImmScalar>;
+ def "int_riscv_sf_vc_v_" # t # "vv" : RISCVSFCustomVC_XVV</*HasDst*/1, /*HasSE*/0, ImmScalar>;
+ }
+ }
+
+ multiclass RISCVSFCustomVC_XVW<list<string> type> {
+ foreach t = type in {
+ defvar ImmScalar = !eq(t, "i");
+ def "int_riscv_sf_vc_" # t # "vw_se" : RISCVSFCustomVC_XVW</*HasDst*/0, /*HasSE*/1, ImmScalar>;
+ def "int_riscv_sf_vc_v_" # t # "vw_se" : RISCVSFCustomVC_XVW</*HasDst*/1, /*HasSE*/1, ImmScalar>;
+ def "int_riscv_sf_vc_v_" # t # "vw" : RISCVSFCustomVC_XVW</*HasDst*/1, /*HasSE*/0, ImmScalar>;
+ }
+ }
+
+ defm "" : RISCVSFCustomVC_X<["x", "i"]>;
+ defm "" : RISCVSFCustomVC_XV<["x", "i", "v", "f"]>;
+ defm "" : RISCVSFCustomVC_XVV<["x", "i", "v", "f"]>;
+ defm "" : RISCVSFCustomVC_XVW<["x", "i", "v", "f"]>;
+} // TargetPrefix = "riscv"
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index c03f3dbe96121..d23d4962eb833 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -510,10 +510,12 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
// RVV intrinsics may have illegal operands.
// We also need to custom legalize vmv.x.s.
- setOperationAction({ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN},
+ setOperationAction({ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN,
+ ISD::INTRINSIC_VOID},
{MVT::i8, MVT::i16}, Custom);
if (Subtarget.is64Bit())
- setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
+ setOperationAction({ISD::INTRINSIC_W_CHAIN, ISD::INTRINSIC_VOID},
+ MVT::i32, Custom);
else
setOperationAction({ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN},
MVT::i64, Custom);
@@ -5999,15 +6001,18 @@ SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
// promoted or expanded.
static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
const RISCVSubtarget &Subtarget) {
- assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
+ assert((Op.getOpcode() == ISD::INTRINSIC_VOID ||
+ Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
"Unexpected opcode");
if (!Subtarget.hasVInstructions())
return SDValue();
- bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
+ bool HasChain = Op.getOpcode() == ISD::INTRINSIC_VOID ||
+ Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
+
SDLoc DL(Op);
const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
@@ -6476,7 +6481,7 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
}
}
- return SDValue();
+ return lowerVectorIntrinsicScalars(Op, DAG, Subtarget);
}
static unsigned getRVVReductionOp(unsigned ISDOpcode) {
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index c84a58b0ce380..b6a683994634d 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -78,7 +78,8 @@ class PseudoToVInst<string PseudoInst> {
["_M1", ""],
["_M2", ""],
["_M4", ""],
- ["_M8", ""]
+ ["_M8", ""],
+ ["_SE", ""]
];
string VInst = !foldl(PseudoInst, AffixSubsts, Acc, AffixSubst,
!subst(AffixSubst[0], AffixSubst[1], Acc));
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
index dd68318716211..52f2826824223 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
@@ -27,6 +27,41 @@ def VCIX_XV : VCIXType<0b0010>;
def VCIX_XVV : VCIXType<0b1010>;
def VCIX_XVW : VCIXType<0b1111>;
+// The payload and timm5 operands are all marked as ImmArg in the IR
+// intrinsic and will be target constant, so use TImmLeaf rather than ImmLeaf.
+def payload1 : Operand<XLenVT>, TImmLeaf<XLenVT, [{return isUInt<1>(Imm);}]> {
+ let ParserMatchClass = UImmAsmOperand<1>;
+ let DecoderMethod = "decodeUImmOperand<1>";
+ let OperandType = "OPERAND_UIMM1";
+ let OperandNamespace = "RISCVOp";
+}
+
+def payload2 : Operand<XLenVT>, TImmLeaf<XLenVT, [{return isUInt<2>(Imm);}]> {
+ let ParserMatchClass = UImmAsmOperand<2>;
+ let DecoderMethod = "decodeUImmOperand<2>";
+ let OperandType = "OPERAND_UIMM2";
+ let OperandNamespace = "RISCVOp";
+}
+
+def payload5 : Operand<XLenVT>, TImmLeaf<XLenVT, [{return isUInt<5>(Imm);}]> {
+ let ParserMatchClass = UImmAsmOperand<5>;
+ let DecoderMethod = "decodeUImmOperand<5>";
+ let OperandType = "OPERAND_UIMM5";
+ let OperandNamespace = "RISCVOp";
+}
+
+def timm5 : Operand<XLenVT>, TImmLeaf<XLenVT, [{return isInt<5>(Imm);}]> {
+ let ParserMatchClass = SImmAsmOperand<5>;
+ let EncoderMethod = "getImmOpValue";
+ let DecoderMethod = "decodeSImmOperand<5>";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (MCOp.evaluateAsConstantImm(Imm))
+ return isInt<5>(Imm);
+ return MCOp.isBareSymbolRef();
+ }];
+}
+
class SwapVCIXIns<dag funct6, dag rd, dag rs2, dag rs1, bit swap> {
dag Ins = !con(funct6, !if(swap, rs2, rd), !if(swap, rd, rs2), rs1);
}
@@ -76,6 +111,15 @@ class RVInstVCFCustom2<bits<4> funct6_hi4, bits<3> funct3, dag outs, dag ins,
let RVVConstraint = NoConstraint;
}
+class GetFTypeInfo<int sew> {
+ ValueType Scalar = !cond(!eq(sew, 16): f16,
+ !eq(sew, 32): f32,
+ !eq(sew, 64): f64);
+ RegisterClass ScalarRegClass = !cond(!eq(sew, 16): FPR16,
+ !eq(sew, 32): FPR32,
+ !eq(sew, 64): FPR64);
+}
+
class VCIXInfo<string suffix, VCIXType type, DAGOperand TyRd,
DAGOperand TyRs2, DAGOperand TyRs1, bit HaveOutputDst> {
string OpcodeStr = !if(HaveOutputDst, "sf.vc.v." # suffix,
@@ -160,3 +204,303 @@ let Predicates = [HasVendorXSfvcp], mayLoad = 0, mayStore = 0,
defm VVW : CustomSiFiveVCIX<"vvw", VCIX_XVW, VR, VR, VR>, Sched<[]>;
defm FVW : CustomSiFiveVCIX<"fvw", VCIX_XVW, VR, VR, FPR32>, Sched<[]>;
}
+
+class VPseudoVC_X<Operand OpClass, DAGOperand RS1Class,
+ bit HasSideEffect = 1> :
+ Pseudo<(outs),
+ (ins OpClass:$op1, payload5:$rs2, payload5:$rd, RS1Class:$r1,
+ AVL:$vl, ixlenimm:$sew), []>,
+ RISCVVPseudo {
+ let mayLoad = 0;
+ let mayStore = 0;
+ let HasVLOp = 1;
+ let HasSEWOp = 1;
+ let hasSideEffects = HasSideEffect;
+ let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+class VPseudoVC_XV<Operand OpClass, VReg RS2Class, DAGOperand RS1Class,
+ bit HasSideEffect = 1> :
+ Pseudo<(outs),
+ (ins OpClass:$op1, payload5:$rd, RS2Class:$rs2, RS1Class:$r1,
+ AVL:$vl, ixlenimm:$sew), []>,
+ RISCVVPseudo {
+ let mayLoad = 0;
+ let mayStore = 0;
+ let HasVLOp = 1;
+ let HasSEWOp = 1;
+ let hasSideEffects = HasSideEffect;
+ let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+class VPseudoVC_XVV<Operand OpClass, VReg RDClass, VReg RS2Class,
+ DAGOperand RS1Class, bit HasSideEffect = 1> :
+ Pseudo<(outs),
+ (ins OpClass:$op1, RDClass:$rd, RS2Class:$rs2, RS1Class:$r1,
+ AVL:$vl, ixlenimm:$sew), []>,
+ RISCVVPseudo {
+ let mayLoad = 0;
+ let mayStore = 0;
+ let HasVLOp = 1;
+ let HasSEWOp = 1;
+ let hasSideEffects = HasSideEffect;
+ let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+class VPseudoVC_V_X<Operand OpClass, VReg RDClass, DAGOperand RS1Class,
+ bit HasSideEffect = 1> :
+ Pseudo<(outs RDClass:$rd),
+ (ins OpClass:$op1, payload5:$rs2, RS1Class:$r1,
+ AVL:$vl, ixlenimm:$sew), []>,
+ RISCVVPseudo {
+ let mayLoad = 0;
+ let mayStore = 0;
+ let HasVLOp = 1;
+ let HasSEWOp = 1;
+ let hasSideEffects = HasSideEffect;
+ let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+class VPseudoVC_V_XV<Operand OpClass, VReg RDClass, VReg RS2Class,
+ DAGOperand RS1Class, bit HasSideEffect = 1> :
+ Pseudo<(outs RDClass:$rd),
+ (ins OpClass:$op1, RS2Class:$rs2, RS1Class:$r1,
+ AVL:$vl, ixlenimm:$sew), []>,
+ RISCVVPseudo {
+ let mayLoad = 0;
+ let mayStore = 0;
+ let HasVLOp = 1;
+ let HasSEWOp = 1;
+ let hasSideEffects = HasSideEffect;
+ let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+class VPseudoVC_V_XVV<Operand OpClass, VReg RDClass, VReg RS2Class,
+ DAGOperand RS1Class, bit HasSideEffect = 1> :
+ Pseudo<(outs RDClass:$rd),
+ (ins OpClass:$op1, RDClass:$rs3, RS2Class:$rs2, RS1Class:$r1,
+ AVL:$vl, ixlenimm:$sew), []>,
+ RISCVVPseudo {
+ let mayLoad = 0;
+ let mayStore = 0;
+ let HasVLOp = 1;
+ let HasSEWOp = 1;
+ let hasSideEffects = HasSideEffect;
+ let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+multiclass VPseudoVC_X<LMULInfo m, DAGOperand RS1Class,
+ Operand OpClass = payload2> {
+ let VLMul = m.value in {
+ def "PseudoVC_" # NAME # "_SE_" # m.MX : VPseudoVC_X<OpClass, RS1Class>;
+ def "PseudoVC_V_" # NAME # "_SE_" # m.MX : VPseudoVC_V_X<OpClass, m.vrclass, RS1Class>;
+ def "PseudoVC_V_" # NAME # "_" # m.MX : VPseudoVC_V_X<OpClass, m.vrclass, RS1Class, 0>;
+ }
+}
+
+multiclass VPseudoVC_XV<LMULInfo m, DAGOperand RS1Class,
+ Operand OpClass = payload2> {
+ let VLMul = m.value in {
+ def "PseudoVC_" # NAME # "_SE_" # m.MX : VPseudoVC_XV<OpClass, m.vrclass, RS1Class>;
+ def "PseudoVC_V_" # NAME # "_SE_" # m.MX : VPseudoVC_V_XV<OpClass, m.vrclass, m.vrclass, RS1Class>;
+ def "PseudoVC_V_" # NAME # "_" # m.MX : VPseudoVC_V_XV<OpClass, m.vrclass, m.vrclass, RS1Class, 0>;
+ }
+}
+
+multiclass VPseudoVC_XVV<LMULInfo m, DAGOperand RS1Class,
+ Operand OpClass = payload2> {
+ let VLMul = m.value in {
+ def "PseudoVC_" # NAME # "_SE_" # m.MX : VPseudoVC_XVV<OpClass, m.vrclass, m.vrclass, RS1Class>;
+ def "PseudoVC_V_" # NAME # "_SE_" # m.MX : VPseudoVC_V_XVV<OpClass, m.vrclass, m.vrclass, RS1Class>;
+ def "PseudoVC_V_" # NAME # "_" # m.MX : VPseudoVC_V_XVV<OpClass, m.vrclass, m.vrclass, RS1Class, 0>;
+ }
+}
+
+multiclass VPseudoVC_XVW<LMULInfo m, DAGOperand RS1Class,
+ Operand OpClass = payload2> {
+ let VLMul = m.value in {
+ def "PseudoVC_" # NAME # "_SE_" # m.MX : VPseudoVC_XVV<OpClass, m.wvrclass, m.vrclass, RS1Class>;
+ let Constraints = "@earlyclobber $rd, $rd = $rs3" in {
+ def "PseudoVC_V_" # NAME # "_SE_" # m.MX : VPseudoVC_V_XVV<OpClass, m.wvrclass, m.vrclass, RS1Class>;
+ def "PseudoVC_V_" # NAME # "_" # m.MX : VPseudoVC_V_XVV<OpClass, m.wvrclass, m.vrclass, RS1Class, 0>;
+ }
+ }
+}
+
+let Predicates = [HasVendorXSfvcp] in {
+ foreach m = MxList in {
+ defm X : VPseudoVC_X<m, GPR>;
+ defm I : VPseudoVC_X<m, timm5>;
+ defm XV : VPseudoVC_XV<m, GPR>;
+ defm IV : VPseudoVC_XV<m, timm5>;
+ defm VV : VPseudoVC_XV<m, m.vrclass>;
+ defm XVV : VPseudoVC_XVV<m, GPR>;
+ defm IVV : VPseudoVC_XVV<m, timm5>;
+ defm VVV : VPseudoVC_XVV<m, m.vrclass>;
+ }
+ foreach f = FPList in {
+ foreach m = f.MxList in {
+ defm f.FX # "V" : VPseudoVC_XV<m, f.fprclass, payload1>;
+ defm f.FX # "VV" : VPseudoVC_XVV<m, f.fprclass, payload1>;
+ }
+ }
+ foreach m = MxListW in {
+ defm XVW : VPseudoVC_XVW<m, GPR>;
+ defm IVW : VPseudoVC_XVW<m, timm5>;
+ defm VVW : VPseudoVC_XVW<m, m.vrclass>;
+ }
+ foreach f = FPListW in {
+ foreach m = f.MxList in
+ defm f.FX # "VW" : VPseudoVC_XVW<m, f.fprclass, payload1>;
+ }
+}
+
+class VPatVC_OP4<string intrinsic_name,
+ string inst,
+ ValueType op2_type,
+ ValueType op3_type,
+ ValueType op4_type,
+ int sew,
+ DAGOperand op2_kind,
+ DAGOperand op3_kind,
+ DAGOperand op4_kind,
+ Operand op1_kind = payload2> :
+ Pat<(!cast<Intrinsic>(intrinsic_name)
+ (XLenVT op1_kind:$op1),
+ (op2_type op2_kind:$op2),
+ (op3_type op3_kind:$op3),
+ (op4_type op4_kind:$op4),
+ VLOpFrag),
+ (!cast<Instruction>(inst)
+ (XLenVT op1_kind:$op1),
+ (op2_type op2_kind:$op2),
+ (op3_type op3_kind:$op3),
+ (op4_type op4_kind:$op4),
+ GPR:$vl, sew)>;
+
+class VPatVC_V_OP4<string intrinsic_name,
+ string inst,
+ ValueType result_type,
+ ValueType op2_type,
+ ValueType op3_type,
+ ValueType op4_type,
+ int sew,
+ DAGOperand op2_kind,
+ DAGOperand op3_kind,
+ DAGOperand op4_kind,
+ Operand op1_kind = payload2> :
+ Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
+ (XLenVT op1_kind:$op1),
+ (op2_type op2_kind:$op2),
+ (op3_type op3_kind:$op3),
+ (op4_type op4_kind:$op4),
+ VLOpFrag)),
+ (!cast<Instruction>(inst)
+ (XLenVT op1_kind:$op1),
+ (op2_type op2_kind:$op2),
+ (op3_type op3_kind:$op3),
+ (op4_type op4_kind:$op4),
+ GPR:$vl, sew)>;
+
+class VPatVC_V_OP3<string intrinsic_name,
+ string inst,
+ ValueType result_type,
+ ValueType op2_type,
+ ValueType op3_type,
+ int sew,
+ DAGOperand op2_kind,
+ DAGOperand op3_kind,
+ Operand op1_kind = payload2> :
+ Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
+ (XLenVT op1_kind:$op1),
+ (op2_type op2_kind:$op2),
+ (op3_type op3_kind:$op3),
+ VLOpFrag)),
+ (!cast<Instruction>(inst)
+ (XLenVT op1_kind:$op1),
+ (op2_type op2_kind:$op2),
+ (op3_type op3_kind:$op3),
+ GPR:$vl, sew)>;
+
+multiclass VPatVC_X<string intrinsic_suffix, string instruction_suffix,
+ VTypeInfo vti, ValueType type, DAGOperand kind> {
+ def : VPatVC_OP4<"int_riscv_sf_vc_" # intrinsic_suffix # "_se_e" # vti.SEW # !tolower(vti.LMul.MX),
+ "PseudoVC_" # instruction_suffix # "_SE_" # vti.LMul.MX,
+ XLenVT, XLenVT, type, vti.Log2SEW,
+ payload5, payload5, kind>;
+ def : VPatVC_V_OP3<"int_riscv_sf_vc_v_" # intrinsic_suffix # "_se",
+ "PseudoVC_V_" # instruction_suffix # "_SE_" # vti.LMul.MX,
+ vti.Vector, XLenVT, type, vti.Log2SEW,
+ payload5, kind>;
+ def : VPatVC_V_OP3<"int_riscv_sf_vc_v_" # intrinsic_suffix,
+ "PseudoVC_V_" # instruction_suffix # "_" # vti.LMul.MX,
+ vti.Vector, XLenVT, type, vti.Log2SEW,
+ payload5, kind>;
+}
+
+multiclass VPatVC_XV<string intrinsic_suffix, string instruction_suffix,
+ VTypeInfo vti, ValueType type, DAGOperand kind,
+ Operand op1_kind = payload2> {
+ def : VPatVC_OP4<"int_riscv_sf_vc_" # intrinsic_suffix # "_se",
+ "PseudoVC_" # instruction_suffix # "_SE_" # vti.LMul.MX,
+ XLenVT, vti.Vector, type, vti.Log2SEW,
+ payload5, vti.RegClass, kind, op1_kind>;
+ def : VPatVC_V_OP3<"int_riscv_sf_vc_v_" # intrinsic_suffix # "_se",
+ "PseudoVC_V_" # instruction_suffix # "_SE_" # vti.LMul.MX,
+ vti.Vector, vti.Vector, type, vti.Log2SEW,
+ vti.RegClass, kind, op1_kind>;
+ def : VPatVC_V_OP3<"int_riscv_sf_vc_v_" # intrinsic_suffix,
+ "PseudoVC_V_" # instruction_suffix # "_" # vti.LMul.MX,
+ vti.Vector, vti.Vector, type, vti.Log2SEW,
+ vti.RegClass, kind, op1_kind>;
+}
+
+multiclass VPatVC_XVV<string intrinsic_suffix, string instruction_suffix,
+ VTypeInfo wti, VTypeInfo vti, ValueType type, DAGOperand kind,
+ Operand op1_kind = payload2> {
+ def : VPatVC_OP4<"int_riscv_sf_vc_" # intrinsic_suffix # "_se",
+ "PseudoVC_" # instruction_suffix # "_SE_" # vti.LMul.MX,
+ wti.Vector, vti.Vector, type, vti.Log2SEW,
+ wti.RegClass, vti.RegClass, kind, op1_kind>;
+ def : VPatVC_V_OP4<"int_riscv_sf_vc_v_" # intrinsic_suffix # "_se",
+ "PseudoVC_V_" # instruction_suffix # "_SE_" # vti.LMul.MX,
+ wti.Vector, wti.Vector, vti.Vector, type, vti.Log2SEW,
+ wti.RegClass, vti.RegClass, kind, op1_kind>;
+ def : VPatVC_V_OP4<"int_riscv_sf_vc_v_" # intrinsic_suffix,
+ "PseudoVC_V_" # instruction_suffix # "_" # vti.LMul.MX,
+ wti.Vector, wti.Vector, vti.Vector, type, vti.Log2SEW,
+ wti.RegClass, vti.RegClass, kind, op1_kind>;
+}
+
+let Predicates = [HasVendorXSfvcp] in {
+ foreach vti = AllIntegerVectors in {
+ defm : VPatVC_X<"x", "X", vti, vti.Scalar, vti.ScalarRegClass>;
+ defm : VPatVC_X<"i", "I", vti, XLenVT, timm5>;
+ defm : VPatVC_XV<"xv", "XV", vti, vti.Scalar, vti.ScalarRegClass>;
+ defm : VPatVC_XV<"iv", "IV", vti, XLenVT, timm5>;
+ defm : VPatVC_XV<"vv", "VV", vti, vti.Vector, vti.RegClass>;
+ defm : VPatVC_XVV<"xvv", "XVV", vti, vti, vti.Scalar, vti.ScalarRegClass>;
+ defm : VPatVC_XVV<"ivv", "IVV", vti, vti, XLenVT, timm5>;
+ defm : VPatVC_XVV<"vvv", "VVV", vti, vti, vti.Vector, vti.RegClass>;
+ if !ge(vti.SEW, 16) then {
+ defm : VPatVC_XV<"fv", "F" # vti.SEW # "V", vti,
+ GetFTypeInfo<vti.SEW>.Scalar,
+ GetFTypeInfo<vti.SEW>.ScalarRegClass, payload1>;
+ defm : VPatVC_XVV<"fvv", "F" # vti.SEW # "VV", vti, vti,
+ GetFTypeInfo<vti.SEW>.Scalar,
+ GetFTypeInfo<vti.SEW>.ScalarRegClass, payload1>;
+ }
+ }
+ foreach VtiToWti = AllWidenableIntVectors in {
+ defvar vti = VtiToWti.Vti;
+ defvar wti = VtiToWti.Wti;
+ defm : VPatVC_XVV<"xvw", "XVW", wti, vti, vti.Scalar, vti.ScalarRegClass>;
+ defm : VPatVC_XVV<"ivw", "IVW", wti, vti, XLenVT, timm5>;
+ defm : VPatVC_XVV<"vvw", "VVW", wti, vti, vti.Vector, vti.RegClass>;
+ if !ge(vti.SEW, 16) then {
+ defm : VPatVC_XVV<"fvw", "F" # vti.SEW # "VW", wti, vti,
+ GetFTypeInfo<vti.SEW>.Scalar,
+ GetFTypeInfo<vti.SEW>.ScalarRegClass, payload1>;
+ }
+ }
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll
new file mode 100644
index 0000000000000..250a8bd1df962
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll
@@ -0,0 +1,1565 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+xsfvcp \
+; RUN: -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvcp \
+; RUN: -verify-machineinstrs | FileCheck %s
+
+define void @test_sf_vc_x_se_e8mf8(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e8mf8.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e8mf8.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen)
+
+define void @test_sf_vc_x_se_e8mf4(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e8mf4.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e8mf4.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen)
+
+define void @test_sf_vc_x_se_e8mf2(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e8mf2.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e8mf2.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen)
+
+define void @test_sf_vc_x_se_e8m1(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e8m1.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e8m1.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen)
+
+define void @test_sf_vc_x_se_e8m2(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e8m2.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e8m2.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen)
+
+define void @test_sf_vc_x_se_e8m4(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e8m4.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e8m4.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen)
+
+define void @test_sf_vc_x_se_e8m8(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e8m8.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e8m8.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen)
+
+define void @test_sf_vc_x_se_e16mf4(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e16mf4.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e16mf4.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen)
+
+define void @test_sf_vc_x_se_e16mf2(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e16mf2.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e16mf2.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen)
+
+define void @test_sf_vc_x_se_e16m1(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e16m1.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e16m1.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen)
+
+define void @test_sf_vc_x_se_e16m2(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e16m2.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e16m2.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen)
+
+define void @test_sf_vc_x_se_e16m4(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e16m4.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e16m4.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen)
+
+define void @test_sf_vc_x_se_e16m8(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e16m8.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e16m8.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen)
+
+define void @test_sf_vc_x_se_e32mf2(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e32mf2.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e32mf2.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen)
+
+define void @test_sf_vc_x_se_e32m1(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e32m1.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e32m1.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen)
+
+define void @test_sf_vc_x_se_e32m2(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e32m2.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e32m2.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen)
+
+define void @test_sf_vc_x_se_e32m4(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e32m4.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e32m4.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen)
+
+define void @test_sf_vc_x_se_e32m8(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e32m8.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e32m8.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen)
+
+define <vscale x 1 x i8> @test_sf_vc_v_x_se_e8mf8(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.x.se.nxv1i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl)
+ ret <vscale x 1 x i8> %0
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.x.se.nxv1i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen)
+
+define <vscale x 2 x i8> @test_sf_vc_v_x_se_e8mf4(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.x.se.nxv2i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl)
+ ret <vscale x 2 x i8> %0
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.x.se.nxv2i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen)
+
+define <vscale x 4 x i8> @test_sf_vc_v_x_se_e8mf2(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.x.se.nxv4i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl)
+ ret <vscale x 4 x i8> %0
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.x.se.nxv4i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen)
+
+define <vscale x 8 x i8> @test_sf_vc_v_x_se_e8m1(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.x.se.nxv8i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl)
+ ret <vscale x 8 x i8> %0
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.x.se.nxv8i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen)
+
+define <vscale x 16 x i8> @test_sf_vc_v_x_se_e8m2(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.x.se.nxv16i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl)
+ ret <vscale x 16 x i8> %0
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.x.se.nxv16i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen)
+
+define <vscale x 32 x i8> @test_sf_vc_v_x_se_e8m4(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.x.se.nxv32i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl)
+ ret <vscale x 32 x i8> %0
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.x.se.nxv32i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen)
+
+define <vscale x 64 x i8> @test_sf_vc_v_x_se_e8m8(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.x.se.nxv64i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl)
+ ret <vscale x 64 x i8> %0
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.x.se.nxv64i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_x_se_e16mf4(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.x.se.nxv1i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl)
+ ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.x.se.nxv1i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_x_se_e16mf2(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.x.se.nxv2i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl)
+ ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.x.se.nxv2i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_x_se_e16m1(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.x.se.nxv4i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl)
+ ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.x.se.nxv4i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_x_se_e16m2(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.x.se.nxv8i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl)
+ ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.x.se.nxv8i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_x_se_e16m4(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.x.se.nxv16i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl)
+ ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.x.se.nxv16i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_x_se_e16m8(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.x.se.nxv32i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl)
+ ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.x.se.nxv32i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_x_se_e32mf2(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.x.se.nxv1i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl)
+ ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.x.se.nxv1i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_x_se_e32m1(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.x.se.nxv2i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl)
+ ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.x.se.nxv2i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_x_se_e32m2(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.x.se.nxv4i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl)
+ ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.x.se.nxv4i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_x_se_e32m4(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.x.se.nxv8i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl)
+ ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.x.se.nxv8i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_x_se_e32m8(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.x.se.nxv16i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl)
+ ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.x.se.nxv16i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen)
+
+define <vscale x 1 x i8> @test_sf_vc_v_x_e8mf8(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.x.nxv1i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl)
+ ret <vscale x 1 x i8> %0
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.x.nxv1i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen)
+
+define <vscale x 2 x i8> @test_sf_vc_v_x_e8mf4(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.x.nxv2i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl)
+ ret <vscale x 2 x i8> %0
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.x.nxv2i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen)
+
+define <vscale x 4 x i8> @test_sf_vc_v_x_e8mf2(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.x.nxv4i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl)
+ ret <vscale x 4 x i8> %0
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.x.nxv4i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen)
+
+define <vscale x 8 x i8> @test_sf_vc_v_x_e8m1(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.x.nxv8i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl)
+ ret <vscale x 8 x i8> %0
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.x.nxv8i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen)
+
+define <vscale x 16 x i8> @test_sf_vc_v_x_e8m2(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.x.nxv16i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl)
+ ret <vscale x 16 x i8> %0
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.x.nxv16i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen)
+
+define <vscale x 32 x i8> @test_sf_vc_v_x_e8m4(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.x.nxv32i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl)
+ ret <vscale x 32 x i8> %0
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.x.nxv32i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen)
+
+define <vscale x 64 x i8> @test_sf_vc_v_x_e8m8(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.x.nxv64i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl)
+ ret <vscale x 64 x i8> %0
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.x.nxv64i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_x_e16mf4(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.x.nxv1i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl)
+ ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.x.nxv1i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_x_e16mf2(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.x.nxv2i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl)
+ ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.x.nxv2i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_x_e16m1(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.x.nxv4i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl)
+ ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.x.nxv4i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_x_e16m2(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.x.nxv8i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl)
+ ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.x.nxv8i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_x_e16m4(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.x.nxv16i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl)
+ ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.x.nxv16i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_x_e16m8(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.x.nxv32i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl)
+ ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.x.nxv32i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_x_e32mf2(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.x.nxv1i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl)
+ ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.x.nxv1i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_x_e32m1(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.x.nxv2i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl)
+ ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.x.nxv2i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_x_e32m2(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.x.nxv4i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl)
+ ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.x.nxv4i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_x_e32m4(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.x.nxv8i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl)
+ ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.x.nxv8i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_x_e32m8(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.x.nxv16i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl)
+ ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.x.nxv16i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen)
+
+define void @test_sf_vc_i_se_e8mf8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e8mf8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e8mf8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e8mf4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e8mf4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e8mf4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e8mf2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e8mf2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e8mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e8m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e8m1.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e8m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e8m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e8m2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e8m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e8m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e8m4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e8m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e8m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e8m8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e8m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e16mf4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e16mf4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e16mf4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e16mf2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e16mf2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e16mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e16m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e16m1.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e16m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e16m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e16m2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e16m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e16m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e16m4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e16m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e16m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e16m8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e16m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e32mf2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e32mf2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e32mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e32m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e32m1.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e32m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e32m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e32m2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e32m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e32m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e32m4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e32m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e32m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e32m8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e32m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e64m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e64m1.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e64m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e64m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e64m2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e64m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e64m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e64m4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e64m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e64m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e64m8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e64m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 1 x i8> @test_sf_vc_v_i_se_e8mf8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.i.se.nxv1i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 1 x i8> %0
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.i.se.nxv1i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 2 x i8> @test_sf_vc_v_i_se_e8mf4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.i.se.nxv2i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 2 x i8> %0
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.i.se.nxv2i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 4 x i8> @test_sf_vc_v_i_se_e8mf2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.i.se.nxv4i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 4 x i8> %0
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.i.se.nxv4i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 8 x i8> @test_sf_vc_v_i_se_e8m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.i.se.nxv8i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 8 x i8> %0
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.i.se.nxv8i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 16 x i8> @test_sf_vc_v_i_se_e8m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.i.se.nxv16i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 16 x i8> %0
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.i.se.nxv16i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 32 x i8> @test_sf_vc_v_i_se_e8m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.i.se.nxv32i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 32 x i8> %0
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.i.se.nxv32i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 64 x i8> @test_sf_vc_v_i_se_e8m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.i.se.nxv64i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 64 x i8> %0
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.i.se.nxv64i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_i_se_e16mf4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.i.se.nxv1i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.i.se.nxv1i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_i_se_e16mf2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.i.se.nxv2i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.i.se.nxv2i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_i_se_e16m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.i.se.nxv4i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.i.se.nxv4i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_i_se_e16m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.i.se.nxv8i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.i.se.nxv8i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_i_se_e16m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.i.se.nxv16i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.i.se.nxv16i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_i_se_e16m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.i.se.nxv32i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.i.se.nxv32i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_i_se_e32mf2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.i.se.nxv1i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.i.se.nxv1i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_i_se_e32m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.i.se.nxv2i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.i.se.nxv2i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_i_se_e32m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.i.se.nxv4i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.i.se.nxv4i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_i_se_e32m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.i.se.nxv8i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.i.se.nxv8i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_i_se_e32m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.i.se.nxv16i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.i.se.nxv16i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_i_se_e64m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.i.se.nxv1i64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.i.se.nxv1i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_i_se_e64m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.i.se.nxv2i64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.i.se.nxv2i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_i_se_e64m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.i.se.nxv4i64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.i.se.nxv4i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_i_se_e64m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.i.se.nxv8i64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.i.se.nxv8i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 1 x i8> @test_sf_vc_v_i_e8mf8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.i.nxv1i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 1 x i8> %0
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.i.nxv1i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 2 x i8> @test_sf_vc_v_i_e8mf4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.i.nxv2i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 2 x i8> %0
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.i.nxv2i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 4 x i8> @test_sf_vc_v_i_e8mf2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.i.nxv4i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 4 x i8> %0
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.i.nxv4i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 8 x i8> @test_sf_vc_v_i_e8m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.i.nxv8i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 8 x i8> %0
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.i.nxv8i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 16 x i8> @test_sf_vc_v_i_e8m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.i.nxv16i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 16 x i8> %0
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.i.nxv16i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 32 x i8> @test_sf_vc_v_i_e8m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.i.nxv32i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 32 x i8> %0
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.i.nxv32i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 64 x i8> @test_sf_vc_v_i_e8m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.i.nxv64i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 64 x i8> %0
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.i.nxv64i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_i_e16mf4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.i.nxv1i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.i.nxv1i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_i_e16mf2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.i.nxv2i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.i.nxv2i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_i_e16m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.i.nxv4i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.i.nxv4i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_i_e16m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.i.nxv8i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.i.nxv8i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_i_e16m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.i.nxv16i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.i.nxv16i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_i_e16m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.i.nxv32i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.i.nxv32i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_i_e32mf2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.i.nxv1i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.i.nxv1i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_i_e32m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.i.nxv2i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.i.nxv2i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_i_e32m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.i.nxv4i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.i.nxv4i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_i_e32m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.i.nxv8i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.i.nxv8i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_i_e32m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.i.nxv16i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.i.nxv16i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_i_e64m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.i.nxv1i64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.i.nxv1i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_i_e64m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.i.nxv2i64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.i.nxv2i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_i_e64m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.i.nxv4i64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.i.nxv4i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_i_e64m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.i.nxv8i64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.i.nxv8i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
diff --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv.ll
new file mode 100644
index 0000000000000..d4063180c63e2
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv.ll
@@ -0,0 +1,3008 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+xsfvcp \
+; RUN: -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+xsfvcp \
+; RUN: -verify-machineinstrs | FileCheck %s
+
+define void @test_sf_vc_vv_se_e8mf8(<vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i8.nxv1i8.iXLen(iXLen 3, iXLen 31, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
+
+define void @test_sf_vc_vv_se_e8mf4(<vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i8.nxv2i8.iXLen(iXLen 3, iXLen 31, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
+
+define void @test_sf_vc_vv_se_e8mf2(<vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i8.nxv4i8.iXLen(iXLen 3, iXLen 31, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
+
+define void @test_sf_vc_vv_se_e8m1(<vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i8.nxv8i8.iXLen(iXLen 3, iXLen 31, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
+
+define void @test_sf_vc_vv_se_e8m2(<vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i8.nxv16i8.iXLen(iXLen 3, iXLen 31, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
+
+define void @test_sf_vc_vv_se_e8m4(<vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v12
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32i8.nxv32i8.iXLen(iXLen 3, iXLen 31, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
+
+define void @test_sf_vc_vv_se_e8m8(<vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v16
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv64i8.nxv64i8.iXLen(iXLen 3, iXLen 31, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv64i8.nxv64i8.iXLen(iXLen, iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
+
+define void @test_sf_vc_vv_se_e16mf4(<vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i16.nxv1i16.iXLen(iXLen 3, iXLen 31, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
+
+define void @test_sf_vc_vv_se_e16mf2(<vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i16.nxv2i16.iXLen(iXLen 3, iXLen 31, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
+
+define void @test_sf_vc_vv_se_e16m1(<vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i16.nxv4i16.iXLen(iXLen 3, iXLen 31, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
+
+define void @test_sf_vc_vv_se_e16m2(<vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i16.nxv8i16.iXLen(iXLen 3, iXLen 31, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
+
+define void @test_sf_vc_vv_se_e16m4(<vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v12
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i16.nxv16i16.iXLen(iXLen 3, iXLen 31, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
+
+define void @test_sf_vc_vv_se_e16m8(<vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v16
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32i16.nxv32i16.iXLen(iXLen 3, iXLen 31, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32i16.nxv32i16.iXLen(iXLen, iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
+
+define void @test_sf_vc_vv_se_e32mf2(<vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i32.nxv1i32.iXLen(iXLen 3, iXLen 31, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
+
+define void @test_sf_vc_vv_se_e32m1(<vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i32.nxv2i32.iXLen(iXLen 3, iXLen 31, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
+
+define void @test_sf_vc_vv_se_e32m2(<vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i32.nxv4i32.iXLen(iXLen 3, iXLen 31, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
+
+define void @test_sf_vc_vv_se_e32m4(<vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v12
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i32.nxv8i32.iXLen(iXLen 3, iXLen 31, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
+
+define void @test_sf_vc_vv_se_e32m8(<vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v16
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i32.nxv16i32.iXLen(iXLen 3, iXLen 31, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i32.nxv16i32.iXLen(iXLen, iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen)
+
+define void @test_sf_vc_vv_se_e64m1(<vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i64.nxv1i64.iXLen(iXLen 3, iXLen 31, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i64.nxv1i64.iXLen(iXLen, iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen)
+
+define void @test_sf_vc_vv_se_e64m2(<vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i64.nxv2i64.iXLen(iXLen 3, iXLen 31, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i64.nxv2i64.iXLen(iXLen, iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen)
+
+define void @test_sf_vc_vv_se_e64m4(<vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v12
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i64.nxv4i64.iXLen(iXLen 3, iXLen 31, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i64.nxv4i64.iXLen(iXLen, iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen)
+
+define void @test_sf_vc_vv_se_e64m8(<vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v16
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i64.nxv8i64.iXLen(iXLen 3, iXLen 31, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i64.nxv8i64.iXLen(iXLen, iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen)
+
+define <vscale x 1 x i8> @test_sf_vc_v_vv_se_e8mf8(<vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv1i8.iXLen.nxv1i8.iXLen(iXLen 3, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
+ ret <vscale x 1 x i8> %0
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
+
+define <vscale x 2 x i8> @test_sf_vc_v_vv_se_e8mf4(<vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv2i8.iXLen.nxv2i8.iXLen(iXLen 3, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
+ ret <vscale x 2 x i8> %0
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
+
+define <vscale x 4 x i8> @test_sf_vc_v_vv_se_e8mf2(<vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv4i8.iXLen.nxv4i8.iXLen(iXLen 3, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
+ ret <vscale x 4 x i8> %0
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
+
+define <vscale x 8 x i8> @test_sf_vc_v_vv_se_e8m1(<vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv8i8.iXLen.nxv8i8.iXLen(iXLen 3, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
+ ret <vscale x 8 x i8> %0
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
+
+define <vscale x 16 x i8> @test_sf_vc_v_vv_se_e8m2(<vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv16i8.iXLen.nxv16i8.iXLen(iXLen 3, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
+ ret <vscale x 16 x i8> %0
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
+
+define <vscale x 32 x i8> @test_sf_vc_v_vv_se_e8m4(<vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv32i8.iXLen.nxv32i8.iXLen(iXLen 3, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
+ ret <vscale x 32 x i8> %0
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
+
+define <vscale x 64 x i8> @test_sf_vc_v_vv_se_e8m8(<vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv64i8.iXLen.nxv64i8.iXLen(iXLen 3, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl)
+ ret <vscale x 64 x i8> %0
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_vv_se_e16mf4(<vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv1i16.iXLen.nxv1i16.iXLen(iXLen 3, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
+ ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_vv_se_e16mf2(<vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv2i16.iXLen.nxv2i16.iXLen(iXLen 3, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
+ ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_vv_se_e16m1(<vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv4i16.iXLen.nxv4i16.iXLen(iXLen 3, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
+ ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_vv_se_e16m2(<vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv8i16.iXLen.nxv8i16.iXLen(iXLen 3, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
+ ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_vv_se_e16m4(<vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv16i16.iXLen.nxv16i16.iXLen(iXLen 3, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
+ ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_vv_se_e16m8(<vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv32i16.iXLen.nxv32i16.iXLen(iXLen 3, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl)
+ ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_vv_se_e32mf2(<vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv1i32.iXLen.nxv1i32.iXLen(iXLen 3, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
+ ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_vv_se_e32m1(<vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv2i32.iXLen.nxv2i32.iXLen(iXLen 3, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
+ ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_vv_se_e32m2(<vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv4i32.iXLen.nxv4i32.iXLen(iXLen 3, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
+ ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_vv_se_e32m4(<vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv8i32.iXLen.nxv8i32.iXLen(iXLen 3, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
+ ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_vv_se_e32m8(<vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv16i32.iXLen.nxv16i32.iXLen(iXLen 3, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl)
+ ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_vv_se_e64m1(<vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv1i64.iXLen.nxv1i64.iXLen(iXLen 3, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl)
+ ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_vv_se_e64m2(<vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv2i64.iXLen.nxv2i64.iXLen(iXLen 3, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl)
+ ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_vv_se_e64m4(<vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv4i64.iXLen.nxv4i64.iXLen(iXLen 3, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl)
+ ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_vv_se_e64m8(<vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv8i64.iXLen.nxv8i64.iXLen(iXLen 3, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl)
+ ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen)
+
+define <vscale x 1 x i8> @test_sf_vc_v_vv_e8mf8(<vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vv.nxv1i8.iXLen.nxv1i8.iXLen(iXLen 3, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
+ ret <vscale x 1 x i8> %0
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vv.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
+
+define <vscale x 2 x i8> @test_sf_vc_v_vv_e8mf4(<vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vv.nxv2i8.iXLen.nxv2i8.iXLen(iXLen 3, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
+ ret <vscale x 2 x i8> %0
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vv.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
+
+define <vscale x 4 x i8> @test_sf_vc_v_vv_e8mf2(<vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vv.nxv4i8.iXLen.nxv4i8.iXLen(iXLen 3, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
+ ret <vscale x 4 x i8> %0
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vv.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
+
+define <vscale x 8 x i8> @test_sf_vc_v_vv_e8m1(<vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vv.nxv8i8.iXLen.nxv8i8.iXLen(iXLen 3, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
+ ret <vscale x 8 x i8> %0
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vv.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
+
+define <vscale x 16 x i8> @test_sf_vc_v_vv_e8m2(<vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vv.nxv16i8.iXLen.nxv16i8.iXLen(iXLen 3, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
+ ret <vscale x 16 x i8> %0
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vv.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
+
+define <vscale x 32 x i8> @test_sf_vc_v_vv_e8m4(<vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vv.nxv32i8.iXLen.nxv32i8.iXLen(iXLen 3, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
+ ret <vscale x 32 x i8> %0
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vv.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
+
+define <vscale x 64 x i8> @test_sf_vc_v_vv_e8m8(<vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vv.nxv64i8.iXLen.nxv64i8.iXLen(iXLen 3, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl)
+ ret <vscale x 64 x i8> %0
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vv.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_vv_e16mf4(<vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vv.nxv1i16.iXLen.nxv1i16.iXLen(iXLen 3, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
+ ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vv.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_vv_e16mf2(<vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vv.nxv2i16.iXLen.nxv2i16.iXLen(iXLen 3, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
+ ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vv.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_vv_e16m1(<vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vv.nxv4i16.iXLen.nxv4i16.iXLen(iXLen 3, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
+ ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vv.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_vv_e16m2(<vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vv.nxv8i16.iXLen.nxv8i16.iXLen(iXLen 3, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
+ ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vv.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_vv_e16m4(<vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vv.nxv16i16.iXLen.nxv16i16.iXLen(iXLen 3, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
+ ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vv.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_vv_e16m8(<vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vv.nxv32i16.iXLen.nxv32i16.iXLen(iXLen 3, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl)
+ ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vv.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_vv_e32mf2(<vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vv.nxv1i32.iXLen.nxv1i32.iXLen(iXLen 3, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
+ ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vv.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_vv_e32m1(<vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vv.nxv2i32.iXLen.nxv2i32.iXLen(iXLen 3, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
+ ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vv.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_vv_e32m2(<vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vv.nxv4i32.iXLen.nxv4i32.iXLen(iXLen 3, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
+ ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vv.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_vv_e32m4(<vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vv.nxv8i32.iXLen.nxv8i32.iXLen(iXLen 3, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
+ ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vv.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_vv_e32m8(<vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vv.nxv16i32.iXLen.nxv16i32.iXLen(iXLen 3, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl)
+ ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vv.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_vv_e64m1(<vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vv.nxv1i64.iXLen.nxv1i64.iXLen(iXLen 3, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl)
+ ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vv.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_vv_e64m2(<vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vv.nxv2i64.iXLen.nxv2i64.iXLen(iXLen 3, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl)
+ ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vv.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_vv_e64m4(<vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vv.nxv4i64.iXLen.nxv4i64.iXLen(iXLen 3, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl)
+ ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vv.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_vv_e64m8(<vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vv.nxv8i64.iXLen.nxv8i64.iXLen(iXLen 3, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl)
+ ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vv.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen)
+
+define void @test_sf_vc_xv_se_e8mf8(<vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i8.i8.iXLen(iXLen 3, iXLen 31, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i8.i8.iXLen(iXLen, iXLen, <vscale x 1 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xv_se_e8mf4(<vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i8.i8.iXLen(iXLen 3, iXLen 31, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i8.i8.iXLen(iXLen, iXLen, <vscale x 2 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xv_se_e8mf2(<vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i8.i8.iXLen(iXLen 3, iXLen 31, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i8.i8.iXLen(iXLen, iXLen, <vscale x 4 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xv_se_e8m1(<vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i8.i8.iXLen(iXLen 3, iXLen 31, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i8.i8.iXLen(iXLen, iXLen, <vscale x 8 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xv_se_e8m2(<vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i8.i8.iXLen(iXLen 3, iXLen 31, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i8.i8.iXLen(iXLen, iXLen, <vscale x 16 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xv_se_e8m4(<vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32i8.i8.iXLen(iXLen 3, iXLen 31, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32i8.i8.iXLen(iXLen, iXLen, <vscale x 32 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xv_se_e8m8(<vscale x 64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv64i8.i8.iXLen(iXLen 3, iXLen 31, <vscale x 64 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv64i8.i8.iXLen(iXLen, iXLen, <vscale x 64 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xv_se_e16mf4(<vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i16.i16.iXLen(iXLen 3, iXLen 31, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i16.i16.iXLen(iXLen, iXLen, <vscale x 1 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xv_se_e16mf2(<vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i16.i16.iXLen(iXLen 3, iXLen 31, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i16.i16.iXLen(iXLen, iXLen, <vscale x 2 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xv_se_e16m1(<vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i16.i16.iXLen(iXLen 3, iXLen 31, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i16.i16.iXLen(iXLen, iXLen, <vscale x 4 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xv_se_e16m2(<vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i16.i16.iXLen(iXLen 3, iXLen 31, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i16.i16.iXLen(iXLen, iXLen, <vscale x 8 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xv_se_e16m4(<vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i16.i16.iXLen(iXLen 3, iXLen 31, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i16.i16.iXLen(iXLen, iXLen, <vscale x 16 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xv_se_e16m8(<vscale x 32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32i16.i16.iXLen(iXLen 3, iXLen 31, <vscale x 32 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32i16.i16.iXLen(iXLen, iXLen, <vscale x 32 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xv_se_e32mf2(<vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i32.i32.iXLen(iXLen 3, iXLen 31, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i32.i32.iXLen(iXLen, iXLen, <vscale x 1 x i32>, i32, iXLen)
+
+define void @test_sf_vc_xv_se_e32m1(<vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i32.i32.iXLen(iXLen 3, iXLen 31, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i32.i32.iXLen(iXLen, iXLen, <vscale x 2 x i32>, i32, iXLen)
+
+define void @test_sf_vc_xv_se_e32m2(<vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i32.i32.iXLen(iXLen 3, iXLen 31, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i32.i32.iXLen(iXLen, iXLen, <vscale x 4 x i32>, i32, iXLen)
+
+define void @test_sf_vc_xv_se_e32m4(<vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i32.i32.iXLen(iXLen 3, iXLen 31, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i32.i32.iXLen(iXLen, iXLen, <vscale x 8 x i32>, i32, iXLen)
+
+define void @test_sf_vc_xv_se_e32m8(<vscale x 16 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i32.i32.iXLen(iXLen 3, iXLen 31, <vscale x 16 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i32.i32.iXLen(iXLen, iXLen, <vscale x 16 x i32>, i32, iXLen)
+
+define <vscale x 1 x i8> @test_sf_vc_v_xv_se_e8mf8(<vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv1i8.iXLen.i8.iXLen(iXLen 3, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 1 x i8> %0
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv1i8.iXLen.i8.iXLen(iXLen, <vscale x 1 x i8>, i8, iXLen)
+
+define <vscale x 2 x i8> @test_sf_vc_v_xv_se_e8mf4(<vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv2i8.iXLen.i8.iXLen(iXLen 3, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 2 x i8> %0
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv2i8.iXLen.i8.iXLen(iXLen, <vscale x 2 x i8>, i8, iXLen)
+
+define <vscale x 4 x i8> @test_sf_vc_v_xv_se_e8mf2(<vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv4i8.iXLen.i8.iXLen(iXLen 3, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 4 x i8> %0
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv4i8.iXLen.i8.iXLen(iXLen, <vscale x 4 x i8>, i8, iXLen)
+
+define <vscale x 8 x i8> @test_sf_vc_v_xv_se_e8m1(<vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv8i8.iXLen.i8.iXLen(iXLen 3, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 8 x i8> %0
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv8i8.iXLen.i8.iXLen(iXLen, <vscale x 8 x i8>, i8, iXLen)
+
+define <vscale x 16 x i8> @test_sf_vc_v_xv_se_e8m2(<vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv16i8.iXLen.i8.iXLen(iXLen 3, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 16 x i8> %0
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv16i8.iXLen.i8.iXLen(iXLen, <vscale x 16 x i8>, i8, iXLen)
+
+define <vscale x 32 x i8> @test_sf_vc_v_xv_se_e8m4(<vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv32i8.iXLen.i8.iXLen(iXLen 3, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 32 x i8> %0
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv32i8.iXLen.i8.iXLen(iXLen, <vscale x 32 x i8>, i8, iXLen)
+
+define <vscale x 64 x i8> @test_sf_vc_v_xv_se_e8m8(<vscale x 64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv64i8.iXLen.i8.iXLen(iXLen 3, <vscale x 64 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 64 x i8> %0
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv64i8.iXLen.i8.iXLen(iXLen, <vscale x 64 x i8>, i8, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_xv_se_e16mf4(<vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv1i16.iXLen.i16.iXLen(iXLen 3, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv1i16.iXLen.i16.iXLen(iXLen, <vscale x 1 x i16>, i16, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_xv_se_e16mf2(<vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv2i16.iXLen.i16.iXLen(iXLen 3, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv2i16.iXLen.i16.iXLen(iXLen, <vscale x 2 x i16>, i16, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_xv_se_e16m1(<vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv4i16.iXLen.i16.iXLen(iXLen 3, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv4i16.iXLen.i16.iXLen(iXLen, <vscale x 4 x i16>, i16, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_xv_se_e16m2(<vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv8i16.iXLen.i16.iXLen(iXLen 3, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv8i16.iXLen.i16.iXLen(iXLen, <vscale x 8 x i16>, i16, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_xv_se_e16m4(<vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv16i16.iXLen.i16.iXLen(iXLen 3, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv16i16.iXLen.i16.iXLen(iXLen, <vscale x 16 x i16>, i16, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_xv_se_e16m8(<vscale x 32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv32i16.iXLen.i16.iXLen(iXLen 3, <vscale x 32 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv32i16.iXLen.i16.iXLen(iXLen, <vscale x 32 x i16>, i16, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_xv_se_e32mf2(<vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv1i32.i32.i32.iXLen(iXLen 3, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv1i32.i32.i32.iXLen(iXLen, <vscale x 1 x i32>, i32, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_xv_se_e32m1(<vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv2i32.i32.i32.iXLen(iXLen 3, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv2i32.i32.i32.iXLen(iXLen, <vscale x 2 x i32>, i32, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_xv_se_e32m2(<vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv4i32.i32.i32.iXLen(iXLen 3, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv4i32.i32.i32.iXLen(iXLen, <vscale x 4 x i32>, i32, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_xv_se_e32m4(<vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv8i32.i32.i32.iXLen(iXLen 3, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv8i32.i32.i32.iXLen(iXLen, <vscale x 8 x i32>, i32, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_xv_se_e32m8(<vscale x 16 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv16i32.i32.i32.iXLen(iXLen 3, <vscale x 16 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv16i32.i32.i32.iXLen(iXLen, <vscale x 16 x i32>, i32, iXLen)
+
+define <vscale x 1 x i8> @test_sf_vc_v_xv_e8mf8(<vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xv.nxv1i8.iXLen.i8.iXLen(iXLen 3, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 1 x i8> %0
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xv.nxv1i8.iXLen.i8.iXLen(iXLen, <vscale x 1 x i8>, i8, iXLen)
+
+define <vscale x 2 x i8> @test_sf_vc_v_xv_e8mf4(<vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xv.nxv2i8.iXLen.i8.iXLen(iXLen 3, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 2 x i8> %0
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xv.nxv2i8.iXLen.i8.iXLen(iXLen, <vscale x 2 x i8>, i8, iXLen)
+
+define <vscale x 4 x i8> @test_sf_vc_v_xv_e8mf2(<vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xv.nxv4i8.iXLen.i8.iXLen(iXLen 3, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 4 x i8> %0
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xv.nxv4i8.iXLen.i8.iXLen(iXLen, <vscale x 4 x i8>, i8, iXLen)
+
+define <vscale x 8 x i8> @test_sf_vc_v_xv_e8m1(<vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xv.nxv8i8.iXLen.i8.iXLen(iXLen 3, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 8 x i8> %0
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xv.nxv8i8.iXLen.i8.iXLen(iXLen, <vscale x 8 x i8>, i8, iXLen)
+
+define <vscale x 16 x i8> @test_sf_vc_v_xv_e8m2(<vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xv.nxv16i8.iXLen.i8.iXLen(iXLen 3, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 16 x i8> %0
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xv.nxv16i8.iXLen.i8.iXLen(iXLen, <vscale x 16 x i8>, i8, iXLen)
+
+define <vscale x 32 x i8> @test_sf_vc_v_xv_e8m4(<vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xv.nxv32i8.iXLen.i8.iXLen(iXLen 3, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 32 x i8> %0
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xv.nxv32i8.iXLen.i8.iXLen(iXLen, <vscale x 32 x i8>, i8, iXLen)
+
+define <vscale x 64 x i8> @test_sf_vc_v_xv_e8m8(<vscale x 64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xv.nxv64i8.iXLen.i8.iXLen(iXLen 3, <vscale x 64 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 64 x i8> %0
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xv.nxv64i8.iXLen.i8.iXLen(iXLen, <vscale x 64 x i8>, i8, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_xv_e16mf4(<vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xv.nxv1i16.iXLen.i16.iXLen(iXLen 3, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xv.nxv1i16.iXLen.i16.iXLen(iXLen, <vscale x 1 x i16>, i16, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_xv_e16mf2(<vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xv.nxv2i16.iXLen.i16.iXLen(iXLen 3, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xv.nxv2i16.iXLen.i16.iXLen(iXLen, <vscale x 2 x i16>, i16, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_xv_e16m1(<vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xv.nxv4i16.iXLen.i16.iXLen(iXLen 3, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xv.nxv4i16.iXLen.i16.iXLen(iXLen, <vscale x 4 x i16>, i16, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_xv_e16m2(<vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xv.nxv8i16.iXLen.i16.iXLen(iXLen 3, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xv.nxv8i16.iXLen.i16.iXLen(iXLen, <vscale x 8 x i16>, i16, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_xv_e16m4(<vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xv.nxv16i16.iXLen.i16.iXLen(iXLen 3, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xv.nxv16i16.iXLen.i16.iXLen(iXLen, <vscale x 16 x i16>, i16, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_xv_e16m8(<vscale x 32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xv.nxv32i16.iXLen.i16.iXLen(iXLen 3, <vscale x 32 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xv.nxv32i16.iXLen.i16.iXLen(iXLen, <vscale x 32 x i16>, i16, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_xv_e32mf2(<vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xv.nxv1i32.i32.i32.iXLen(iXLen 3, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xv.nxv1i32.i32.i32.iXLen(iXLen, <vscale x 1 x i32>, i32, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_xv_e32m1(<vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xv.nxv2i32.i32.i32.iXLen(iXLen 3, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xv.nxv2i32.i32.i32.iXLen(iXLen, <vscale x 2 x i32>, i32, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_xv_e32m2(<vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xv.nxv4i32.i32.i32.iXLen(iXLen 3, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xv.nxv4i32.i32.i32.iXLen(iXLen, <vscale x 4 x i32>, i32, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_xv_e32m4(<vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xv.nxv8i32.i32.i32.iXLen(iXLen 3, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xv.nxv8i32.i32.i32.iXLen(iXLen, <vscale x 8 x i32>, i32, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_xv_e32m8(<vscale x 16 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xv.nxv16i32.i32.i32.iXLen(iXLen 3, <vscale x 16 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xv.nxv16i32.i32.i32.iXLen(iXLen, <vscale x 16 x i32>, i32, iXLen)
+
+define void @test_sf_vc_iv_se_e8mf8(<vscale x 1 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i8.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 1 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i8.iXLen.iXLen(iXLen, iXLen, <vscale x 1 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e8mf4(<vscale x 2 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i8.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 2 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i8.iXLen.iXLen(iXLen, iXLen, <vscale x 2 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e8mf2(<vscale x 4 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i8.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 4 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i8.iXLen.iXLen(iXLen, iXLen, <vscale x 4 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e8m1(<vscale x 8 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i8.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 8 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i8.iXLen.iXLen(iXLen, iXLen, <vscale x 8 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e8m2(<vscale x 16 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i8.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 16 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i8.iXLen.iXLen(iXLen, iXLen, <vscale x 16 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e8m4(<vscale x 32 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32i8.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 32 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32i8.iXLen.iXLen(iXLen, iXLen, <vscale x 32 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e8m8(<vscale x 64 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv64i8.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 64 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv64i8.iXLen.iXLen(iXLen, iXLen, <vscale x 64 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e16mf4(<vscale x 1 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i16.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 1 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i16.iXLen.iXLen(iXLen, iXLen, <vscale x 1 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e16mf2(<vscale x 2 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i16.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 2 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i16.iXLen.iXLen(iXLen, iXLen, <vscale x 2 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e16m1(<vscale x 4 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i16.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 4 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i16.iXLen.iXLen(iXLen, iXLen, <vscale x 4 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e16m2(<vscale x 8 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i16.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 8 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i16.iXLen.iXLen(iXLen, iXLen, <vscale x 8 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e16m4(<vscale x 16 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i16.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 16 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i16.iXLen.iXLen(iXLen, iXLen, <vscale x 16 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e16m8(<vscale x 32 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32i16.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 32 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32i16.iXLen.iXLen(iXLen, iXLen, <vscale x 32 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e32mf2(<vscale x 1 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i32.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 1 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i32.iXLen.iXLen(iXLen, iXLen, <vscale x 1 x i32>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e32m1(<vscale x 2 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i32.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 2 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i32.iXLen.iXLen(iXLen, iXLen, <vscale x 2 x i32>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e32m2(<vscale x 4 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i32.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 4 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i32.iXLen.iXLen(iXLen, iXLen, <vscale x 4 x i32>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e32m4(<vscale x 8 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i32.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 8 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i32.iXLen.iXLen(iXLen, iXLen, <vscale x 8 x i32>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e32m8(<vscale x 16 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i32.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 16 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i32.iXLen.iXLen(iXLen, iXLen, <vscale x 16 x i32>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e64m1(<vscale x 1 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i64.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 1 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i64.iXLen.iXLen(iXLen, iXLen, <vscale x 1 x i64>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e64m2(<vscale x 2 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i64.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 2 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i64.iXLen.iXLen(iXLen, iXLen, <vscale x 2 x i64>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e64m4(<vscale x 4 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i64.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 4 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i64.iXLen.iXLen(iXLen, iXLen, <vscale x 4 x i64>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e64m8(<vscale x 8 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i64.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 8 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i64.iXLen.iXLen(iXLen, iXLen, <vscale x 8 x i64>, iXLen, iXLen)
+
+define <vscale x 1 x i8> @test_sf_vc_v_iv_se_e8mf8(<vscale x 1 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv1i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 1 x i8> %0
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv1i8.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i8>, iXLen, iXLen)
+
+define <vscale x 2 x i8> @test_sf_vc_v_iv_se_e8mf4(<vscale x 2 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv2i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 2 x i8> %0
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv2i8.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i8>, iXLen, iXLen)
+
+define <vscale x 4 x i8> @test_sf_vc_v_iv_se_e8mf2(<vscale x 4 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv4i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 4 x i8> %0
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv4i8.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i8>, iXLen, iXLen)
+
+define <vscale x 8 x i8> @test_sf_vc_v_iv_se_e8m1(<vscale x 8 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv8i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 8 x i8> %0
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv8i8.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i8>, iXLen, iXLen)
+
+define <vscale x 16 x i8> @test_sf_vc_v_iv_se_e8m2(<vscale x 16 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv16i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 16 x i8> %0
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv16i8.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i8>, iXLen, iXLen)
+
+define <vscale x 32 x i8> @test_sf_vc_v_iv_se_e8m4(<vscale x 32 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv32i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 32 x i8> %0
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv32i8.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x i8>, iXLen, iXLen)
+
+define <vscale x 64 x i8> @test_sf_vc_v_iv_se_e8m8(<vscale x 64 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv64i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 64 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 64 x i8> %0
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv64i8.iXLen.iXLen.iXLen(iXLen, <vscale x 64 x i8>, iXLen, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_iv_se_e16mf4(<vscale x 1 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv1i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv1i16.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i16>, iXLen, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_iv_se_e16mf2(<vscale x 2 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv2i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv2i16.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i16>, iXLen, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_iv_se_e16m1(<vscale x 4 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv4i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv4i16.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i16>, iXLen, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_iv_se_e16m2(<vscale x 8 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv8i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv8i16.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i16>, iXLen, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_iv_se_e16m4(<vscale x 16 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv16i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv16i16.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i16>, iXLen, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_iv_se_e16m8(<vscale x 32 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv32i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv32i16.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x i16>, iXLen, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_iv_se_e32mf2(<vscale x 1 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv1i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv1i32.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i32>, iXLen, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_iv_se_e32m1(<vscale x 2 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv2i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv2i32.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i32>, iXLen, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_iv_se_e32m2(<vscale x 4 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv4i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv4i32.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i32>, iXLen, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_iv_se_e32m4(<vscale x 8 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv8i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv8i32.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i32>, iXLen, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_iv_se_e32m8(<vscale x 16 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv16i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv16i32.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i32>, iXLen, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_iv_se_e64m1(<vscale x 1 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv1i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv1i64.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i64>, iXLen, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_iv_se_e64m2(<vscale x 2 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv2i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv2i64.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i64>, iXLen, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_iv_se_e64m4(<vscale x 4 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv4i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv4i64.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i64>, iXLen, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_iv_se_e64m8(<vscale x 8 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv8i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv8i64.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i64>, iXLen, iXLen)
+
+define <vscale x 1 x i8> @test_sf_vc_v_iv_e8mf8(<vscale x 1 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.iv.nxv1i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 1 x i8> %0
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.iv.nxv1i8.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i8>, iXLen, iXLen)
+
+define <vscale x 2 x i8> @test_sf_vc_v_iv_e8mf4(<vscale x 2 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.iv.nxv2i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 2 x i8> %0
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.iv.nxv2i8.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i8>, iXLen, iXLen)
+
+define <vscale x 4 x i8> @test_sf_vc_v_iv_e8mf2(<vscale x 4 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.iv.nxv4i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 4 x i8> %0
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.iv.nxv4i8.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i8>, iXLen, iXLen)
+
+define <vscale x 8 x i8> @test_sf_vc_v_iv_e8m1(<vscale x 8 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.iv.nxv8i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 8 x i8> %0
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.iv.nxv8i8.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i8>, iXLen, iXLen)
+
+define <vscale x 16 x i8> @test_sf_vc_v_iv_e8m2(<vscale x 16 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.iv.nxv16i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 16 x i8> %0
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.iv.nxv16i8.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i8>, iXLen, iXLen)
+
+define <vscale x 32 x i8> @test_sf_vc_v_iv_e8m4(<vscale x 32 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.iv.nxv32i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 32 x i8> %0
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.iv.nxv32i8.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x i8>, iXLen, iXLen)
+
+define <vscale x 64 x i8> @test_sf_vc_v_iv_e8m8(<vscale x 64 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.iv.nxv64i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 64 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 64 x i8> %0
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.iv.nxv64i8.iXLen.iXLen.iXLen(iXLen, <vscale x 64 x i8>, iXLen, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_iv_e16mf4(<vscale x 1 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.iv.nxv1i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.iv.nxv1i16.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i16>, iXLen, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_iv_e16mf2(<vscale x 2 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.iv.nxv2i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.iv.nxv2i16.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i16>, iXLen, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_iv_e16m1(<vscale x 4 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.iv.nxv4i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.iv.nxv4i16.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i16>, iXLen, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_iv_e16m2(<vscale x 8 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.iv.nxv8i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.iv.nxv8i16.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i16>, iXLen, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_iv_e16m4(<vscale x 16 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.iv.nxv16i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.iv.nxv16i16.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i16>, iXLen, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_iv_e16m8(<vscale x 32 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.iv.nxv32i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.iv.nxv32i16.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x i16>, iXLen, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_iv_e32mf2(<vscale x 1 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.iv.nxv1i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.iv.nxv1i32.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i32>, iXLen, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_iv_e32m1(<vscale x 2 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.iv.nxv2i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.iv.nxv2i32.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i32>, iXLen, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_iv_e32m2(<vscale x 4 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.iv.nxv4i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.iv.nxv4i32.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i32>, iXLen, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_iv_e32m4(<vscale x 8 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.iv.nxv8i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.iv.nxv8i32.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i32>, iXLen, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_iv_e32m8(<vscale x 16 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.iv.nxv16i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.iv.nxv16i32.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i32>, iXLen, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_iv_e64m1(<vscale x 1 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.iv.nxv1i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.iv.nxv1i64.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i64>, iXLen, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_iv_e64m2(<vscale x 2 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.iv.nxv2i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.iv.nxv2i64.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i64>, iXLen, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_iv_e64m4(<vscale x 4 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.iv.nxv4i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.iv.nxv4i64.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i64>, iXLen, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_iv_e64m8(<vscale x 8 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.iv.nxv8i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.iv.nxv8i64.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i64>, iXLen, iXLen)
+
+define void @test_sf_vc_fv_se_e16mf4(<vscale x 1 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i16.f16.iXLen(iXLen 1, iXLen 31, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i16.f16.iXLen(iXLen, iXLen, <vscale x 1 x i16>, half, iXLen)
+
+define void @test_sf_vc_fv_se_e16mf2(<vscale x 2 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i16.f16.iXLen(iXLen 1, iXLen 31, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i16.f16.iXLen(iXLen, iXLen, <vscale x 2 x i16>, half, iXLen)
+
+define void @test_sf_vc_fv_se_e16m1(<vscale x 4 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i16.f16.iXLen(iXLen 1, iXLen 31, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i16.f16.iXLen(iXLen, iXLen, <vscale x 4 x i16>, half, iXLen)
+
+define void @test_sf_vc_fv_se_e16m2(<vscale x 8 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i16.f16.iXLen(iXLen 1, iXLen 31, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i16.f16.iXLen(iXLen, iXLen, <vscale x 8 x i16>, half, iXLen)
+
+define void @test_sf_vc_fv_se_e16m4(<vscale x 16 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16i16.f16.iXLen(iXLen 1, iXLen 31, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16i16.f16.iXLen(iXLen, iXLen, <vscale x 16 x i16>, half, iXLen)
+
+define void @test_sf_vc_fv_se_e16m8(<vscale x 32 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv32i16.f16.iXLen(iXLen 1, iXLen 31, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv32i16.f16.iXLen(iXLen, iXLen, <vscale x 32 x i16>, half, iXLen)
+
+define void @test_sf_vc_fv_se_e32mf2(<vscale x 1 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i32.f32.iXLen(iXLen 1, iXLen 31, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i32.f32.iXLen(iXLen, iXLen, <vscale x 1 x i32>, float, iXLen)
+
+define void @test_sf_vc_fv_se_e32m1(<vscale x 2 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i32.f32.iXLen(iXLen 1, iXLen 31, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i32.f32.iXLen(iXLen, iXLen, <vscale x 2 x i32>, float, iXLen)
+
+define void @test_sf_vc_fv_se_e32m2(<vscale x 4 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i32.f32.iXLen(iXLen 1, iXLen 31, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i32.f32.iXLen(iXLen, iXLen, <vscale x 4 x i32>, float, iXLen)
+
+define void @test_sf_vc_fv_se_e32m4(<vscale x 8 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i32.f32.iXLen(iXLen 1, iXLen 31, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i32.f32.iXLen(iXLen, iXLen, <vscale x 8 x i32>, float, iXLen)
+
+define void @test_sf_vc_fv_se_e32m8(<vscale x 16 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16i32.f32.iXLen(iXLen 1, iXLen 31, <vscale x 16 x i32> %vs2, float %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16i32.f32.iXLen(iXLen, iXLen, <vscale x 16 x i32>, float, iXLen)
+
+define void @test_sf_vc_fv_se_e64m1(<vscale x 1 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i64.f64.iXLen(iXLen 1, iXLen 31, <vscale x 1 x i64> %vs2, double %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i64.f64.iXLen(iXLen, iXLen, <vscale x 1 x i64>, double, iXLen)
+
+define void @test_sf_vc_fv_se_e64m2(<vscale x 2 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i64.f64.iXLen(iXLen 1, iXLen 31, <vscale x 2 x i64> %vs2, double %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i64.f64.iXLen(iXLen, iXLen, <vscale x 2 x i64>, double, iXLen)
+
+define void @test_sf_vc_fv_se_e64m4(<vscale x 4 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i64.f64.iXLen(iXLen 1, iXLen 31, <vscale x 4 x i64> %vs2, double %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i64.f64.iXLen(iXLen, iXLen, <vscale x 4 x i64>, double, iXLen)
+
+define void @test_sf_vc_fv_se_e64m8(<vscale x 8 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i64.f64.iXLen(iXLen 1, iXLen 31, <vscale x 8 x i64> %vs2, double %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i64.f64.iXLen(iXLen, iXLen, <vscale x 8 x i64>, double, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_fv_se_e16mf4(<vscale x 1 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv1i16.iXLen.f16.iXLen(iXLen 1, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv1i16.iXLen.f16.iXLen(iXLen, <vscale x 1 x i16>, half, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_fv_se_e16mf2(<vscale x 2 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv2i16.iXLen.f16.iXLen(iXLen 1, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv2i16.iXLen.f16.iXLen(iXLen, <vscale x 2 x i16>, half, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_fv_se_e16m1(<vscale x 4 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv4i16.iXLen.f16.iXLen(iXLen 1, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv4i16.iXLen.f16.iXLen(iXLen, <vscale x 4 x i16>, half, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_fv_se_e16m2(<vscale x 8 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv8i16.iXLen.f16.iXLen(iXLen 1, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv8i16.iXLen.f16.iXLen(iXLen, <vscale x 8 x i16>, half, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_fv_se_e16m4(<vscale x 16 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv16i16.iXLen.f16.iXLen(iXLen 1, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv16i16.iXLen.f16.iXLen(iXLen, <vscale x 16 x i16>, half, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_fv_se_e16m8(<vscale x 32 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv32i16.iXLen.f16.iXLen(iXLen 1, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv32i16.iXLen.f16.iXLen(iXLen, <vscale x 32 x i16>, half, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_fv_se_e32mf2(<vscale x 1 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv1i32.iXLen.f32.iXLen(iXLen 1, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv1i32.iXLen.f32.iXLen(iXLen, <vscale x 1 x i32>, float, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_fv_se_e32m1(<vscale x 2 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv2i32.iXLen.f32.iXLen(iXLen 1, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv2i32.iXLen.f32.iXLen(iXLen, <vscale x 2 x i32>, float, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_fv_se_e32m2(<vscale x 4 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv4i32.iXLen.f32.iXLen(iXLen 1, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv4i32.iXLen.f32.iXLen(iXLen, <vscale x 4 x i32>, float, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_fv_se_e32m4(<vscale x 8 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv8i32.iXLen.f32.iXLen(iXLen 1, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv8i32.iXLen.f32.iXLen(iXLen, <vscale x 8 x i32>, float, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_fv_se_e32m8(<vscale x 16 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv16i32.iXLen.f32.iXLen(iXLen 1, <vscale x 16 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv16i32.iXLen.f32.iXLen(iXLen, <vscale x 16 x i32>, float, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_fv_se_e64m1(<vscale x 1 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv1i64.iXLen.f64.iXLen(iXLen 1, <vscale x 1 x i64> %vs2, double %fs1, iXLen %vl)
+ ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv1i64.iXLen.f64.iXLen(iXLen, <vscale x 1 x i64>, double, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_fv_se_e64m2(<vscale x 2 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv2i64.iXLen.f64.iXLen(iXLen 1, <vscale x 2 x i64> %vs2, double %fs1, iXLen %vl)
+ ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv2i64.iXLen.f64.iXLen(iXLen, <vscale x 2 x i64>, double, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_fv_se_e64m4(<vscale x 4 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv4i64.iXLen.f64.iXLen(iXLen 1, <vscale x 4 x i64> %vs2, double %fs1, iXLen %vl)
+ ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv4i64.iXLen.f64.iXLen(iXLen, <vscale x 4 x i64>, double, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_fv_se_e64m8(<vscale x 8 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv8i64.iXLen.f64.iXLen(iXLen 1, <vscale x 8 x i64> %vs2, double %fs1, iXLen %vl)
+ ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv8i64.iXLen.f64.iXLen(iXLen, <vscale x 8 x i64>, double, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_fv_e16mf4(<vscale x 1 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fv.nxv1i16.iXLen.f16.iXLen(iXLen 1, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fv.nxv1i16.iXLen.f16.iXLen(iXLen, <vscale x 1 x i16>, half, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_fv_e16mf2(<vscale x 2 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fv.nxv2i16.iXLen.f16.iXLen(iXLen 1, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fv.nxv2i16.iXLen.f16.iXLen(iXLen, <vscale x 2 x i16>, half, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_fv_e16m1(<vscale x 4 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fv.nxv4i16.iXLen.f16.iXLen(iXLen 1, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fv.nxv4i16.iXLen.f16.iXLen(iXLen, <vscale x 4 x i16>, half, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_fv_e16m2(<vscale x 8 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fv.nxv8i16.iXLen.f16.iXLen(iXLen 1, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fv.nxv8i16.iXLen.f16.iXLen(iXLen, <vscale x 8 x i16>, half, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_fv_e16m4(<vscale x 16 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fv.nxv16i16.iXLen.f16.iXLen(iXLen 1, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fv.nxv16i16.iXLen.f16.iXLen(iXLen, <vscale x 16 x i16>, half, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_fv_e16m8(<vscale x 32 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fv.nxv32i16.iXLen.f16.iXLen(iXLen 1, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fv.nxv32i16.iXLen.f16.iXLen(iXLen, <vscale x 32 x i16>, half, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_fv_e32mf2(<vscale x 1 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fv.nxv1i32.iXLen.f32.iXLen(iXLen 1, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fv.nxv1i32.iXLen.f32.iXLen(iXLen, <vscale x 1 x i32>, float, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_fv_e32m1(<vscale x 2 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fv.nxv2i32.iXLen.f32.iXLen(iXLen 1, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fv.nxv2i32.iXLen.f32.iXLen(iXLen, <vscale x 2 x i32>, float, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_fv_e32m2(<vscale x 4 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fv.nxv4i32.iXLen.f32.iXLen(iXLen 1, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fv.nxv4i32.iXLen.f32.iXLen(iXLen, <vscale x 4 x i32>, float, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_fv_e32m4(<vscale x 8 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fv.nxv8i32.iXLen.f32.iXLen(iXLen 1, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fv.nxv8i32.iXLen.f32.iXLen(iXLen, <vscale x 8 x i32>, float, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_fv_e32m8(<vscale x 16 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fv.nxv16i32.iXLen.f32.iXLen(iXLen 1, <vscale x 16 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fv.nxv16i32.iXLen.f32.iXLen(iXLen, <vscale x 16 x i32>, float, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_fv_e64m1(<vscale x 1 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fv.nxv1i64.iXLen.f64.iXLen(iXLen 1, <vscale x 1 x i64> %vs2, double %fs1, iXLen %vl)
+ ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fv.nxv1i64.iXLen.f64.iXLen(iXLen, <vscale x 1 x i64>, double, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_fv_e64m2(<vscale x 2 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fv.nxv2i64.iXLen.f64.iXLen(iXLen 1, <vscale x 2 x i64> %vs2, double %fs1, iXLen %vl)
+ ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fv.nxv2i64.iXLen.f64.iXLen(iXLen, <vscale x 2 x i64>, double, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_fv_e64m4(<vscale x 4 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fv.nxv4i64.iXLen.f64.iXLen(iXLen 1, <vscale x 4 x i64> %vs2, double %fs1, iXLen %vl)
+ ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fv.nxv4i64.iXLen.f64.iXLen(iXLen, <vscale x 4 x i64>, double, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_fv_e64m8(<vscale x 8 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fv.nxv8i64.iXLen.f64.iXLen(iXLen 1, <vscale x 8 x i64> %vs2, double %fs1, iXLen %vl)
+ ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fv.nxv8i64.iXLen.f64.iXLen(iXLen, <vscale x 8 x i64>, double, iXLen)
diff --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll
new file mode 100644
index 0000000000000..d37d121cdd197
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll
@@ -0,0 +1,3020 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+xsfvcp \
+; RUN: -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+xsfvcp \
+; RUN: -verify-machineinstrs | FileCheck %s
+
+define void @test_sf_vc_vvv_se_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i8.nxv1i8.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
+
+define void @test_sf_vc_vvv_se_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i8.nxv2i8.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
+
+define void @test_sf_vc_vvv_se_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i8.nxv4i8.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
+
+define void @test_sf_vc_vvv_se_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i8.nxv8i8.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
+
+define void @test_sf_vc_vvv_se_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i8.nxv16i8.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
+
+define void @test_sf_vc_vvv_se_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32i8.nxv32i8.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
+
+define void @test_sf_vc_vvv_se_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8r.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v16, v24
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv64i8.nxv64i8.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv64i8.nxv64i8.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
+
+define void @test_sf_vc_vvv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i16.nxv1i16.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
+
+define void @test_sf_vc_vvv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i16.nxv2i16.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
+
+define void @test_sf_vc_vvv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i16.nxv4i16.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
+
+define void @test_sf_vc_vvv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i16.nxv8i16.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
+
+define void @test_sf_vc_vvv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i16.nxv16i16.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
+
+define void @test_sf_vc_vvv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8re16.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v16, v24
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32i16.nxv32i16.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32i16.nxv32i16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
+
+define void @test_sf_vc_vvv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i32.nxv1i32.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
+
+define void @test_sf_vc_vvv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i32.nxv2i32.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
+
+define void @test_sf_vc_vvv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i32.nxv4i32.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
+
+define void @test_sf_vc_vvv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i32.nxv8i32.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
+
+define void @test_sf_vc_vvv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8re32.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v16, v24
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i32.nxv16i32.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i32.nxv16i32.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen)
+
+define void @test_sf_vc_vvv_se_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i64.nxv1i64.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i64.nxv1i64.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen)
+
+define void @test_sf_vc_vvv_se_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i64.nxv2i64.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i64.nxv2i64.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen)
+
+define void @test_sf_vc_vvv_se_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i64.nxv4i64.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i64.nxv4i64.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen)
+
+define void @test_sf_vc_vvv_se_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8re64.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v16, v24
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i64.nxv8i64.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i64.nxv8i64.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen)
+
+define <vscale x 1 x i8> @test_sf_vc_v_vvv_se_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv1i8.iXLen.nxv1i8.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
+ ret <vscale x 1 x i8> %0
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
+
+define <vscale x 2 x i8> @test_sf_vc_v_vvv_se_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv2i8.iXLen.nxv2i8.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
+ ret <vscale x 2 x i8> %0
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
+
+define <vscale x 4 x i8> @test_sf_vc_v_vvv_se_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv4i8.iXLen.nxv4i8.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
+ ret <vscale x 4 x i8> %0
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
+
+define <vscale x 8 x i8> @test_sf_vc_v_vvv_se_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv8i8.iXLen.nxv8i8.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
+ ret <vscale x 8 x i8> %0
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
+
+define <vscale x 16 x i8> @test_sf_vc_v_vvv_se_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv16i8.iXLen.nxv16i8.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
+ ret <vscale x 16 x i8> %0
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
+
+define <vscale x 32 x i8> @test_sf_vc_v_vvv_se_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv32i8.iXLen.nxv32i8.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
+ ret <vscale x 32 x i8> %0
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
+
+define <vscale x 64 x i8> @test_sf_vc_v_vvv_se_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8r.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.iXLen.nxv64i8.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl)
+ ret <vscale x 64 x i8> %0
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_vvv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv1i16.iXLen.nxv1i16.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
+ ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_vvv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv2i16.iXLen.nxv2i16.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
+ ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_vvv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv4i16.iXLen.nxv4i16.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
+ ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_vvv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv8i16.iXLen.nxv8i16.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
+ ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_vvv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv16i16.iXLen.nxv16i16.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
+ ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_vvv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8re16.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.iXLen.nxv32i16.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl)
+ ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_vvv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv1i32.iXLen.nxv1i32.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
+ ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_vvv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv2i32.iXLen.nxv2i32.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
+ ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_vvv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv4i32.iXLen.nxv4i32.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
+ ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_vvv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv8i32.iXLen.nxv8i32.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
+ ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_vvv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8re32.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.iXLen.nxv16i32.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl)
+ ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_vvv_se_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv1i64.iXLen.nxv1i64.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl)
+ ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_vvv_se_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv2i64.iXLen.nxv2i64.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl)
+ ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_vvv_se_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv4i64.iXLen.nxv4i64.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl)
+ ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_vvv_se_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8re64.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.iXLen.nxv8i64.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl)
+ ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen)
+
+define <vscale x 1 x i8> @test_sf_vc_v_vvv_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.nxv1i8.iXLen.nxv1i8.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
+ ret <vscale x 1 x i8> %0
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
+
+define <vscale x 2 x i8> @test_sf_vc_v_vvv_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.nxv2i8.iXLen.nxv2i8.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
+ ret <vscale x 2 x i8> %0
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
+
+define <vscale x 4 x i8> @test_sf_vc_v_vvv_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.nxv4i8.iXLen.nxv4i8.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
+ ret <vscale x 4 x i8> %0
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
+
+define <vscale x 8 x i8> @test_sf_vc_v_vvv_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.nxv8i8.iXLen.nxv8i8.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
+ ret <vscale x 8 x i8> %0
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
+
+define <vscale x 16 x i8> @test_sf_vc_v_vvv_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.nxv16i8.iXLen.nxv16i8.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
+ ret <vscale x 16 x i8> %0
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
+
+define <vscale x 32 x i8> @test_sf_vc_v_vvv_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.nxv32i8.iXLen.nxv32i8.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
+ ret <vscale x 32 x i8> %0
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
+
+define <vscale x 64 x i8> @test_sf_vc_v_vvv_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8r.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.nxv64i8.iXLen.nxv64i8.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl)
+ ret <vscale x 64 x i8> %0
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_vvv_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.nxv1i16.iXLen.nxv1i16.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
+ ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_vvv_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.nxv2i16.iXLen.nxv2i16.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
+ ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_vvv_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.nxv4i16.iXLen.nxv4i16.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
+ ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_vvv_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.nxv8i16.iXLen.nxv8i16.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
+ ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_vvv_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.nxv16i16.iXLen.nxv16i16.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
+ ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_vvv_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8re16.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.nxv32i16.iXLen.nxv32i16.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl)
+ ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_vvv_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.nxv1i32.iXLen.nxv1i32.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
+ ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_vvv_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.nxv2i32.iXLen.nxv2i32.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
+ ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_vvv_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.nxv4i32.iXLen.nxv4i32.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
+ ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_vvv_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.nxv8i32.iXLen.nxv8i32.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
+ ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_vvv_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8re32.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.nxv16i32.iXLen.nxv16i32.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl)
+ ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_vvv_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.nxv1i64.iXLen.nxv1i64.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl)
+ ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_vvv_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.nxv2i64.iXLen.nxv2i64.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl)
+ ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_vvv_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.nxv4i64.iXLen.nxv4i64.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl)
+ ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_vvv_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8re64.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.nxv8i64.iXLen.nxv8i64.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl)
+ ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen)
+
+define void @test_sf_vc_xvv_se_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1i8.i8.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1i8.i8.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvv_se_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2i8.i8.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2i8.i8.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvv_se_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4i8.i8.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4i8.i8.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvv_se_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8i8.i8.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8i8.i8.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvv_se_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16i8.i8.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16i8.i8.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvv_se_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32i8.i8.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32i8.i8.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvv_se_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v16, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv64i8.i8.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv64i8.i8.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1i16.i16.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1i16.i16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xvv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2i16.i16.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2i16.i16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xvv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4i16.i16.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4i16.i16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xvv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8i16.i16.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8i16.i16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xvv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16i16.i16.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16i16.i16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xvv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v16, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32i16.i16.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32i16.i16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xvv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i32.iXLen.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i32.iXLen.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, i32, iXLen)
+
+define void @test_sf_vc_xvv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i32.iXLen.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i32.iXLen.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, i32, iXLen)
+
+define void @test_sf_vc_xvv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i32.iXLen.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i32.iXLen.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, i32, iXLen)
+
+define void @test_sf_vc_xvv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i32.iXLen.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i32.iXLen.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, i32, iXLen)
+
+define void @test_sf_vc_xvv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v16, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i32.iXLen.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i32.iXLen.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, i32, iXLen)
+
+define <vscale x 1 x i8> @test_sf_vc_v_xvv_se_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv1i8.iXLen.i8.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 1 x i8> %0
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv1i8.iXLen.i8.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, i8, iXLen)
+
+define <vscale x 2 x i8> @test_sf_vc_v_xvv_se_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv2i8.iXLen.i8.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 2 x i8> %0
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv2i8.iXLen.i8.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, i8, iXLen)
+
+define <vscale x 4 x i8> @test_sf_vc_v_xvv_se_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv4i8.iXLen.i8.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 4 x i8> %0
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv4i8.iXLen.i8.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, i8, iXLen)
+
+define <vscale x 8 x i8> @test_sf_vc_v_xvv_se_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv8i8.iXLen.i8.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 8 x i8> %0
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv8i8.iXLen.i8.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, i8, iXLen)
+
+define <vscale x 16 x i8> @test_sf_vc_v_xvv_se_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv16i8.iXLen.i8.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 16 x i8> %0
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv16i8.iXLen.i8.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, i8, iXLen)
+
+define <vscale x 32 x i8> @test_sf_vc_v_xvv_se_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv32i8.iXLen.i8.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 32 x i8> %0
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv32i8.iXLen.i8.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, i8, iXLen)
+
+define <vscale x 64 x i8> @test_sf_vc_v_xvv_se_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv64i8.iXLen.i8.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 64 x i8> %0
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv64i8.iXLen.i8.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, i8, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_xvv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv1i16.iXLen.i16.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv1i16.iXLen.i16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, i16, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_xvv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv2i16.iXLen.i16.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv2i16.iXLen.i16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, i16, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_xvv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv4i16.iXLen.i16.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv4i16.iXLen.i16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, i16, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_xvv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv8i16.iXLen.i16.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv8i16.iXLen.i16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, i16, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_xvv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv16i16.iXLen.i16.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv16i16.iXLen.i16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, i16, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_xvv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv32i16.iXLen.i16.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv32i16.iXLen.i16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, i16, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_xvv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv1i32.iXLen.i32.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv1i32.iXLen.i32.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, i32, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_xvv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv2i32.iXLen.i32.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv2i32.iXLen.i32.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, i32, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_xvv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv4i32.iXLen.i32.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv4i32.iXLen.i32.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, i32, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_xvv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv8i32.iXLen.i32.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv8i32.iXLen.i32.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, i32, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_xvv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv16i32.iXLen.i32.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv16i32.iXLen.i32.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, i32, iXLen)
+
+define <vscale x 1 x i8> @test_sf_vc_v_xvv_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.nxv1i8.iXLen.i8.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 1 x i8> %0
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.nxv1i8.iXLen.i8.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, i8, iXLen)
+
+define <vscale x 2 x i8> @test_sf_vc_v_xvv_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.nxv2i8.iXLen.i8.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 2 x i8> %0
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.nxv2i8.iXLen.i8.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, i8, iXLen)
+
+define <vscale x 4 x i8> @test_sf_vc_v_xvv_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.nxv4i8.iXLen.i8.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 4 x i8> %0
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.nxv4i8.iXLen.i8.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, i8, iXLen)
+
+define <vscale x 8 x i8> @test_sf_vc_v_xvv_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.nxv8i8.iXLen.i8.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 8 x i8> %0
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.nxv8i8.iXLen.i8.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, i8, iXLen)
+
+define <vscale x 16 x i8> @test_sf_vc_v_xvv_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.nxv16i8.iXLen.i8.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 16 x i8> %0
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.nxv16i8.iXLen.i8.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, i8, iXLen)
+
+define <vscale x 32 x i8> @test_sf_vc_v_xvv_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.nxv32i8.iXLen.i8.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 32 x i8> %0
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.nxv32i8.iXLen.i8.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, i8, iXLen)
+
+define <vscale x 64 x i8> @test_sf_vc_v_xvv_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.nxv64i8.iXLen.i8.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 64 x i8> %0
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.nxv64i8.iXLen.i8.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, i8, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_xvv_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.nxv1i16.iXLen.i16.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.nxv1i16.iXLen.i16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, i16, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_xvv_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.nxv2i16.iXLen.i16.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.nxv2i16.iXLen.i16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, i16, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_xvv_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.nxv4i16.iXLen.i16.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.nxv4i16.iXLen.i16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, i16, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_xvv_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.nxv8i16.iXLen.i16.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.nxv8i16.iXLen.i16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, i16, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_xvv_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.nxv16i16.iXLen.i16.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.nxv16i16.iXLen.i16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, i16, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_xvv_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.nxv32i16.iXLen.i16.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.nxv32i16.iXLen.i16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, i16, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_xvv_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.nxv1i32.iXLen.i32.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.nxv1i32.iXLen.i32.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, i32, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_xvv_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.nxv2i32.iXLen.i32.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.nxv2i32.iXLen.i32.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, i32, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_xvv_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.nxv4i32.iXLen.i32.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.nxv4i32.iXLen.i32.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, i32, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_xvv_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.nxv8i32.iXLen.i32.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.nxv8i32.iXLen.i32.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, i32, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_xvv_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.nxv16i32.iXLen.i32.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.nxv16i32.iXLen.i32.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, i32, iXLen)
+
+define void @test_sf_vc_ivv_se_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i8.iXLen.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i8.iXLen.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i8.iXLen.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i8.iXLen.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i8.iXLen.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i8.iXLen.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i8.iXLen.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i8.iXLen.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i8.iXLen.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i8.iXLen.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32i8.iXLen.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32i8.iXLen.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v16, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv64i8.iXLen.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv64i8.iXLen.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i16.iXLen.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i16.iXLen.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i16.iXLen.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i16.iXLen.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i16.iXLen.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i16.iXLen.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i16.iXLen.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i16.iXLen.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i16.iXLen.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i16.iXLen.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v16, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32i16.iXLen.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32i16.iXLen.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i32.iXLen.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i32.iXLen.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i32.iXLen.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i32.iXLen.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i32.iXLen.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i32.iXLen.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i32.iXLen.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i32.iXLen.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v16, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i32.iXLen.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i32.iXLen.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i64.iXLen.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i64.iXLen.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i64.iXLen.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i64.iXLen.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i64.iXLen.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i64.iXLen.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v16, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i64.iXLen.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i64.iXLen.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen, iXLen)
+
+define <vscale x 1 x i8> @test_sf_vc_v_ivv_se_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv1i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 1 x i8> %0
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv1i8.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen, iXLen)
+
+define <vscale x 2 x i8> @test_sf_vc_v_ivv_se_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv2i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 2 x i8> %0
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv2i8.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen, iXLen)
+
+define <vscale x 4 x i8> @test_sf_vc_v_ivv_se_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv4i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 4 x i8> %0
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv4i8.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen, iXLen)
+
+define <vscale x 8 x i8> @test_sf_vc_v_ivv_se_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv8i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 8 x i8> %0
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv8i8.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen, iXLen)
+
+define <vscale x 16 x i8> @test_sf_vc_v_ivv_se_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv16i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 16 x i8> %0
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv16i8.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen, iXLen)
+
+define <vscale x 32 x i8> @test_sf_vc_v_ivv_se_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv32i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 32 x i8> %0
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv32i8.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen, iXLen)
+
+define <vscale x 64 x i8> @test_sf_vc_v_ivv_se_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv64i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 64 x i8> %0
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv64i8.iXLen.iXLen.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_ivv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv1i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv1i16.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_ivv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv2i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv2i16.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_ivv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv4i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv4i16.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_ivv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv8i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv8i16.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_ivv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv16i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv16i16.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_ivv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv32i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv32i16.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_ivv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv1i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv1i32.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_ivv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv2i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv2i32.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_ivv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv4i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv4i32.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_ivv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv8i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv8i32.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_ivv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv16i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv16i32.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_ivv_se_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv1i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv1i64.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_ivv_se_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv2i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv2i64.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_ivv_se_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv4i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv4i64.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_ivv_se_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv8i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv8i64.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen, iXLen)
+
+define <vscale x 1 x i8> @test_sf_vc_v_ivv_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.nxv1i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 1 x i8> %0
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.nxv1i8.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen, iXLen)
+
+define <vscale x 2 x i8> @test_sf_vc_v_ivv_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.nxv2i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 2 x i8> %0
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.nxv2i8.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen, iXLen)
+
+define <vscale x 4 x i8> @test_sf_vc_v_ivv_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.nxv4i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 4 x i8> %0
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.nxv4i8.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen, iXLen)
+
+define <vscale x 8 x i8> @test_sf_vc_v_ivv_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.nxv8i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 8 x i8> %0
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.nxv8i8.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen, iXLen)
+
+define <vscale x 16 x i8> @test_sf_vc_v_ivv_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.nxv16i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 16 x i8> %0
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.nxv16i8.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen, iXLen)
+
+define <vscale x 32 x i8> @test_sf_vc_v_ivv_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.nxv32i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 32 x i8> %0
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.nxv32i8.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen, iXLen)
+
+define <vscale x 64 x i8> @test_sf_vc_v_ivv_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.nxv64i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 64 x i8> %0
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.nxv64i8.iXLen.iXLen.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_ivv_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.nxv1i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.nxv1i16.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_ivv_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.nxv2i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.nxv2i16.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_ivv_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.nxv4i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.nxv4i16.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_ivv_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.nxv8i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.nxv8i16.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_ivv_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.nxv16i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.nxv16i16.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_ivv_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.nxv32i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.nxv32i16.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_ivv_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.nxv1i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.nxv1i32.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_ivv_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.nxv2i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.nxv2i32.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_ivv_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.nxv4i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.nxv4i32.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_ivv_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.nxv8i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.nxv8i32.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_ivv_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.nxv16i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.nxv16i32.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_ivv_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.nxv1i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.nxv1i64.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_ivv_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.nxv2i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.nxv2i64.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_ivv_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.nxv4i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.nxv4i64.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_ivv_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.nxv8i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.nxv8i64.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen, iXLen)
+
+define void @test_sf_vc_fvv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i16.f16.iXLen(iXLen 1, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i16.f16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, half, iXLen)
+
+define void @test_sf_vc_fvv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i16.f16.iXLen(iXLen 1, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i16.f16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, half, iXLen)
+
+define void @test_sf_vc_fvv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i16.f16.iXLen(iXLen 1, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i16.f16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, half, iXLen)
+
+define void @test_sf_vc_fvv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.fvv 1, v8, v10, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i16.f16.iXLen(iXLen 1, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i16.f16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, half, iXLen)
+
+define void @test_sf_vc_fvv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.fvv 1, v8, v12, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16i16.f16.iXLen(iXLen 1, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16i16.f16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, half, iXLen)
+
+define void @test_sf_vc_fvv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: sf.vc.fvv 1, v8, v16, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32i16.f16.iXLen(iXLen 1, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32i16.f16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, half, iXLen)
+
+define void @test_sf_vc_fvv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i32.f32.iXLen(iXLen 1, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i32.f32.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, float, iXLen)
+
+define void @test_sf_vc_fvv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i32.f32.iXLen(iXLen 1, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i32.f32.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, float, iXLen)
+
+define void @test_sf_vc_fvv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.fvv 1, v8, v10, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i32.f32.iXLen(iXLen 1, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i32.f32.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, float, iXLen)
+
+define void @test_sf_vc_fvv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.fvv 1, v8, v12, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i32.f32.iXLen(iXLen 1, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i32.f32.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, float, iXLen)
+
+define void @test_sf_vc_fvv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: sf.vc.fvv 1, v8, v16, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16i32.f32.iXLen(iXLen 1, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, float %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16i32.f32.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, float, iXLen)
+
+define void @test_sf_vc_fvv_se_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i64.f64.iXLen(iXLen 1, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, double %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i64.f64.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, double, iXLen)
+
+define void @test_sf_vc_fvv_se_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.fvv 1, v8, v10, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i64.f64.iXLen(iXLen 1, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, double %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i64.f64.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, double, iXLen)
+
+define void @test_sf_vc_fvv_se_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.fvv 1, v8, v12, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i64.f64.iXLen(iXLen 1, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, double %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i64.f64.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, double, iXLen)
+
+define void @test_sf_vc_fvv_se_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: sf.vc.fvv 1, v8, v16, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i64.f64.iXLen(iXLen 1, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, double %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i64.f64.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, double, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_fvv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv1i16.iXLen.f16.iXLen(iXLen 1, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv1i16.iXLen.f16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, half, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_fvv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv2i16.iXLen.f16.iXLen(iXLen 1, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv2i16.iXLen.f16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, half, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_fvv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv4i16.iXLen.f16.iXLen(iXLen 1, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv4i16.iXLen.f16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, half, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_fvv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv8i16.iXLen.f16.iXLen(iXLen 1, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv8i16.iXLen.f16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, half, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_fvv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv16i16.iXLen.f16.iXLen(iXLen 1, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv16i16.iXLen.f16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, half, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_fvv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv32i16.iXLen.f16.iXLen(iXLen 1, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv32i16.iXLen.f16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, half, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_fvv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv1i32.iXLen.f32.iXLen(iXLen 1, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv1i32.iXLen.f32.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, float, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_fvv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv2i32.iXLen.f32.iXLen(iXLen 1, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv2i32.iXLen.f32.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, float, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_fvv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv4i32.iXLen.f32.iXLen(iXLen 1, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv4i32.iXLen.f32.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, float, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_fvv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv8i32.iXLen.f32.iXLen(iXLen 1, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv8i32.iXLen.f32.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, float, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_fvv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv16i32.iXLen.f32.iXLen(iXLen 1, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv16i32.iXLen.f32.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, float, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_fvv_se_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv1i64.iXLen.f64.iXLen(iXLen 1, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, double %fs1, iXLen %vl)
+ ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv1i64.iXLen.f64.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, double, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_fvv_se_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv2i64.iXLen.f64.iXLen(iXLen 1, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, double %fs1, iXLen %vl)
+ ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv2i64.iXLen.f64.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, double, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_fvv_se_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv4i64.iXLen.f64.iXLen(iXLen 1, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, double %fs1, iXLen %vl)
+ ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv4i64.iXLen.f64.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, double, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_fvv_se_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv8i64.iXLen.f64.iXLen(iXLen 1, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, double %fs1, iXLen %vl)
+ ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv8i64.iXLen.f64.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, double, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_fvv_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.nxv1i16.iXLen.f16.iXLen(iXLen 1, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.nxv1i16.iXLen.f16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, half, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_fvv_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.nxv2i16.iXLen.f16.iXLen(iXLen 1, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.nxv2i16.iXLen.f16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, half, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_fvv_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.nxv4i16.iXLen.f16.iXLen(iXLen 1, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.nxv4i16.iXLen.f16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, half, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_fvv_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.nxv8i16.iXLen.f16.iXLen(iXLen 1, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.nxv8i16.iXLen.f16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, half, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_fvv_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.nxv16i16.iXLen.f16.iXLen(iXLen 1, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.nxv16i16.iXLen.f16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, half, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_fvv_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.nxv32i16.iXLen.f16.iXLen(iXLen 1, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.nxv32i16.iXLen.f16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, half, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_fvv_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvv.nxv1i32.iXLen.f32.iXLen(iXLen 1, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvv.nxv1i32.iXLen.f32.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, float, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_fvv_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvv.nxv2i32.iXLen.f32.iXLen(iXLen 1, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvv.nxv2i32.iXLen.f32.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, float, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_fvv_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvv.nxv4i32.iXLen.f32.iXLen(iXLen 1, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvv.nxv4i32.iXLen.f32.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, float, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_fvv_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvv.nxv8i32.iXLen.f32.iXLen(iXLen 1, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvv.nxv8i32.iXLen.f32.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, float, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_fvv_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvv.nxv16i32.iXLen.f32.iXLen(iXLen 1, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvv.nxv16i32.iXLen.f32.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, float, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_fvv_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvv.nxv1i64.iXLen.f64.iXLen(iXLen 1, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, double %fs1, iXLen %vl)
+ ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvv.nxv1i64.iXLen.f64.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, double, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_fvv_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvv.nxv2i64.iXLen.f64.iXLen(iXLen 1, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, double %fs1, iXLen %vl)
+ ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvv.nxv2i64.iXLen.f64.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, double, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_fvv_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvv.nxv4i64.iXLen.f64.iXLen(iXLen 1, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, double %fs1, iXLen %vl)
+ ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvv.nxv4i64.iXLen.f64.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, double, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_fvv_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvv.nxv8i64.iXLen.f64.iXLen(iXLen 1, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, double %fs1, iXLen %vl)
+ ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvv.nxv8i64.iXLen.f64.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, double, iXLen)
diff --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvw.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvw.ll
new file mode 100644
index 0000000000000..2d6ac8d55fc15
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvw.ll
@@ -0,0 +1,2111 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+xsfvcp \
+; RUN: -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+xsfvcp \
+; RUN: -verify-machineinstrs | FileCheck %s
+
+define void @test_sf_vc_vvw_se_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i16.nxv1i8.nxv1i8.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i16.nxv1i8.nxv1i8.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
+
+define void @test_sf_vc_vvw_se_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i16.nxv2i8.nxv2i8.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i16.nxv2i8.nxv2i8.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
+
+define void @test_sf_vc_vvw_se_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i16.nxv4i8.nxv4i8.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i16.nxv4i8.nxv4i8.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
+
+define void @test_sf_vc_vvw_se_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.vvw 3, v8, v10, v11
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i16.nxv8i8.nxv8i8.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i16.nxv8i8.nxv8i8.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
+
+define void @test_sf_vc_vvw_se_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.vvw 3, v8, v12, v14
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16i16.nxv16i8.nxv16i8.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16i16.nxv16i8.nxv16i8.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
+
+define void @test_sf_vc_vvw_se_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.vvw 3, v8, v16, v20
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv32i16.nxv32i8.nxv32i8.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv32i16.nxv32i8.nxv32i8.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
+
+define void @test_sf_vc_vvw_se_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i32.nxv1i16.nxv1i16.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i32.nxv1i16.nxv1i16.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
+
+define void @test_sf_vc_vvw_se_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i32.nxv2i16.nxv2i16.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i32.nxv2i16.nxv2i16.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
+
+define void @test_sf_vc_vvw_se_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.vvw 3, v8, v10, v11
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i32.nxv4i16.nxv4i16.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i32.nxv4i16.nxv4i16.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
+
+define void @test_sf_vc_vvw_se_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.vvw 3, v8, v12, v14
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i32.nxv8i16.nxv8i16.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i32.nxv8i16.nxv8i16.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
+
+define void @test_sf_vc_vvw_se_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.vvw 3, v8, v16, v20
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16i32.nxv16i16.nxv16i16.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16i32.nxv16i16.nxv16i16.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
+
+define void @test_sf_vc_vvw_se_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i64.nxv1i32.nxv1i32.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i64.nxv1i32.nxv1i32.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
+
+define void @test_sf_vc_vvw_se_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.vvw 3, v8, v10, v11
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i64.nxv2i32.nxv2i32.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i64.nxv2i32.nxv2i32.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
+
+define void @test_sf_vc_vvw_se_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.vvw 3, v8, v12, v14
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i64.nxv4i32.nxv4i32.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i64.nxv4i32.nxv4i32.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
+
+define void @test_sf_vc_vvw_se_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.vvw 3, v8, v16, v20
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i64.nxv8i32.nxv8i32.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i64.nxv8i32.nxv8i32.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_vvw_se_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv1i16.iXLen.nxv1i8.nxv1i8.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
+ ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv1i16.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_vvw_se_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv2i16.iXLen.nxv2i8.nxv2i8.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
+ ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv2i16.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_vvw_se_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv4i16.iXLen.nxv4i8.nxv4i8.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
+ ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv4i16.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_vvw_se_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv8i16.iXLen.nxv8i8.nxv8i8.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
+ ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv8i16.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_vvw_se_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv16i16.iXLen.nxv16i8.nxv16i8.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
+ ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv16i16.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_vvw_se_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv32i16.iXLen.nxv32i8.nxv32i8.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
+ ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv32i16.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_vvw_se_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv1i32.iXLen.nxv1i16.nxv1i16.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
+ ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv1i32.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_vvw_se_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv2i32.iXLen.nxv2i16.nxv2i16.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
+ ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv2i32.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_vvw_se_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv4i32.iXLen.nxv4i16.nxv4i16.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
+ ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv4i32.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_vvw_se_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv8i32.iXLen.nxv8i16.nxv8i16.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
+ ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv8i32.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_vvw_se_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv16i32.iXLen.nxv16i16.nxv16i16.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
+ ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv16i32.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_vvw_se_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv1i64.iXLen.nxv1i32.nxv1i32.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
+ ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv1i64.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_vvw_se_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv2i64.iXLen.nxv2i32.nxv2i32.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
+ ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv2i64.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_vvw_se_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv4i64.iXLen.nxv4i32.nxv4i32.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
+ ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv4i64.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_vvw_se_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv8i64.iXLen.nxv8i32.nxv8i32.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
+ ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv8i64.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_vvw_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvw.nxv1i16.iXLen.nxv1i8.nxv1i8.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
+ ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvw.nxv1i16.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_vvw_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvw.nxv2i16.iXLen.nxv2i8.nxv2i8.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
+ ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvw.nxv2i16.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_vvw_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvw.nxv4i16.iXLen.nxv4i8.nxv4i8.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
+ ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvw.nxv4i16.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_vvw_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvw.nxv8i16.iXLen.nxv8i8.nxv8i8.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
+ ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvw.nxv8i16.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_vvw_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvw.nxv16i16.iXLen.nxv16i8.nxv16i8.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
+ ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvw.nxv16i16.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_vvw_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvw.nxv32i16.iXLen.nxv32i8.nxv32i8.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
+ ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvw.nxv32i16.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_vvw_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvw.nxv1i32.iXLen.nxv1i16.nxv1i16.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
+ ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvw.nxv1i32.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_vvw_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvw.nxv2i32.iXLen.nxv2i16.nxv2i16.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
+ ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvw.nxv2i32.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_vvw_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvw.nxv4i32.iXLen.nxv4i16.nxv4i16.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
+ ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvw.nxv4i32.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_vvw_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvw.nxv8i32.iXLen.nxv8i16.nxv8i16.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
+ ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvw.nxv8i32.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_vvw_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvw.nxv16i32.iXLen.nxv16i16.nxv16i16.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
+ ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvw.nxv16i32.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_vvw_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvw.nxv1i64.iXLen.nxv1i32.nxv1i32.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
+ ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvw.nxv1i64.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_vvw_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvw.nxv2i64.iXLen.nxv2i32.nxv2i32.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
+ ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvw.nxv2i64.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_vvw_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvw.nxv4i64.iXLen.nxv4i32.nxv4i32.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
+ ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvw.nxv4i64.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_vvw_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvw.nxv8i64.iXLen.nxv8i32.nxv8i32.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
+ ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvw.nxv8i64.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
+
+define void @test_sf_vc_xvw_se_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i16.nxv1i8.i8.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i16.nxv1i8.i8.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvw_se_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i16.nxv2i8.i8.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i16.nxv2i8.i8.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvw_se_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i16.nxv4i8.i8.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i16.nxv4i8.i8.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvw_se_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.xvw 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i16.nxv8i8.i8.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i16.nxv8i8.i8.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvw_se_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.xvw 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16i16.nxv16i8.i8.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16i16.nxv16i8.i8.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvw_se_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.xvw 3, v8, v16, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv32i16.nxv32i8.i8.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv32i16.nxv32i8.i8.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvw_se_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i32.nxv1i16.i16.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i32.nxv1i16.i16.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xvw_se_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i32.nxv2i16.i16.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i32.nxv2i16.i16.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xvw_se_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.xvw 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i32.nxv4i16.i16.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i32.nxv4i16.i16.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xvw_se_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.xvw 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i32.nxv8i16.i16.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i32.nxv8i16.i16.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xvw_se_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.xvw 3, v8, v16, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16i32.nxv16i16.i16.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16i32.nxv16i16.i16.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xvw_se_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i64.nxv1i32.i32.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i64.nxv1i32.i32.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i32>, i32, iXLen)
+
+define void @test_sf_vc_xvw_se_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.xvw 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i64.nxv2i32.i32.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i64.nxv2i32.i32.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i32>, i32, iXLen)
+
+define void @test_sf_vc_xvw_se_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.xvw 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i64.nxv4i32.i32.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i64.nxv4i32.i32.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i32>, i32, iXLen)
+
+define void @test_sf_vc_xvw_se_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.xvw 3, v8, v16, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i64.nxv8i32.i32.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i64.nxv8i32.i32.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, i32, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_xvw_se_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv1i16.iXLen.nxv1i8.i8.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv1i16.iXLen.nxv1i8.i8.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i8>, i8, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_xvw_se_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv2i16.iXLen.nxv2i8.i8.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv2i16.iXLen.nxv2i8.i8.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i8>, i8, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_xvw_se_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv4i16.iXLen.nxv4i8.i8.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv4i16.iXLen.nxv4i8.i8.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i8>, i8, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_xvw_se_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv8i16.iXLen.nxv8i8.i8.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv8i16.iXLen.nxv8i8.i8.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i8>, i8, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_xvw_se_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv16i16.iXLen.nxv16i8.i8.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv16i16.iXLen.nxv16i8.i8.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i8>, i8, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_xvw_se_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv32i16.iXLen.nxv32i8.i8.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv32i16.iXLen.nxv32i8.i8.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i8>, i8, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_xvw_se_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv1i32.iXLen.nxv1i16.i16.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv1i32.iXLen.nxv1i16.i16.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i16>, i16, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_xvw_se_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv2i32.iXLen.nxv2i16.i16.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv2i32.iXLen.nxv2i16.i16.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i16>, i16, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_xvw_se_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv4i32.iXLen.nxv4i16.i16.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv4i32.iXLen.nxv4i16.i16.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i16>, i16, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_xvw_se_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv8i32.iXLen.nxv8i16.i16.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv8i32.iXLen.nxv8i16.i16.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i16>, i16, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_xvw_se_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv16i32.iXLen.nxv16i16.i16.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv16i32.iXLen.nxv16i16.i16.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i16>, i16, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_xvw_se_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv1i64.i32.nxv1i32.iXLen.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv1i64.i32.nxv1i32.iXLen.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i32>, i32, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_xvw_se_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv2i64.i32.nxv2i32.iXLen.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv2i64.i32.nxv2i32.iXLen.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i32>, i32, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_xvw_se_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv4i64.i32.nxv4i32.iXLen.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv4i64.i32.nxv4i32.iXLen.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i32>, i32, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_xvw_se_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv8i64.i32.nxv8i32.iXLen.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv8i64.i32.nxv8i32.iXLen.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, i32, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_xvw_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvw.nxv1i16.iXLen.nxv1i8.i8.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvw.nxv1i16.iXLen.nxv1i8.i8.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i8>, i8, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_xvw_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvw.nxv2i16.iXLen.nxv2i8.i8.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvw.nxv2i16.iXLen.nxv2i8.i8.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i8>, i8, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_xvw_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvw.nxv4i16.iXLen.nxv4i8.i8.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvw.nxv4i16.iXLen.nxv4i8.i8.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i8>, i8, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_xvw_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvw.nxv8i16.iXLen.nxv8i8.i8.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvw.nxv8i16.iXLen.nxv8i8.i8.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i8>, i8, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_xvw_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvw.nxv16i16.iXLen.nxv16i8.i8.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvw.nxv16i16.iXLen.nxv16i8.i8.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i8>, i8, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_xvw_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvw.nxv32i16.iXLen.nxv32i8.i8.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvw.nxv32i16.iXLen.nxv32i8.i8.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i8>, i8, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_xvw_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvw.nxv1i32.iXLen.nxv1i16.i16.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvw.nxv1i32.iXLen.nxv1i16.i16.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i16>, i16, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_xvw_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvw.nxv2i32.iXLen.nxv2i16.i16.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvw.nxv2i32.iXLen.nxv2i16.i16.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i16>, i16, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_xvw_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvw.nxv4i32.iXLen.nxv4i16.i16.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvw.nxv4i32.iXLen.nxv4i16.i16.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i16>, i16, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_xvw_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvw.nxv8i32.iXLen.nxv8i16.i16.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvw.nxv8i32.iXLen.nxv8i16.i16.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i16>, i16, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_xvw_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvw.nxv16i32.iXLen.nxv16i16.i16.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvw.nxv16i32.iXLen.nxv16i16.i16.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i16>, i16, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_xvw_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvw.nxv1i64.iXLen.nxv1i32.i32.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvw.nxv1i64.iXLen.nxv1i32.i32.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i32>, i32, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_xvw_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvw.nxv2i64.iXLen.nxv2i32.i32.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvw.nxv2i64.iXLen.nxv2i32.i32.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i32>, i32, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_xvw_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvw.nxv4i64.iXLen.nxv4i32.i32.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvw.nxv4i64.iXLen.nxv4i32.i32.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i32>, i32, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_xvw_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvw.nxv8i64.iXLen.nxv8i32.i32.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvw.nxv8i64.iXLen.nxv8i32.i32.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, i32, iXLen)
+
+define void @test_sf_vc_ivw_se_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i16.nxv1i8.iXLen.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i16.nxv1i8.iXLen.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_ivw_se_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i16.nxv2i8.iXLen.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i16.nxv2i8.iXLen.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_ivw_se_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i16.nxv4i8.iXLen.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i16.nxv4i8.iXLen.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_ivw_se_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.ivw 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i16.nxv8i8.iXLen.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i16.nxv8i8.iXLen.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_ivw_se_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.ivw 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16i16.nxv16i8.iXLen.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16i16.nxv16i8.iXLen.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_ivw_se_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.ivw 3, v8, v16, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv32i16.nxv32i8.iXLen.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv32i16.nxv32i8.iXLen.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_ivw_se_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i32.nxv1i16.iXLen.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i32.nxv1i16.iXLen.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_ivw_se_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i32.nxv2i16.iXLen.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i32.nxv2i16.iXLen.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_ivw_se_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.ivw 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i32.nxv4i16.iXLen.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i32.nxv4i16.iXLen.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_ivw_se_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.ivw 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i32.nxv8i16.iXLen.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i32.nxv8i16.iXLen.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_ivw_se_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.ivw 3, v8, v16, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16i32.nxv16i16.iXLen.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16i32.nxv16i16.iXLen.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_ivw_se_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i64.nxv1i32.iXLen.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i64.nxv1i32.iXLen.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i32>, iXLen, iXLen)
+
+define void @test_sf_vc_ivw_se_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.ivw 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i64.nxv2i32.iXLen.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i64.nxv2i32.iXLen.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i32>, iXLen, iXLen)
+
+define void @test_sf_vc_ivw_se_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.ivw 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i64.nxv4i32.iXLen.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i64.nxv4i32.iXLen.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i32>, iXLen, iXLen)
+
+define void @test_sf_vc_ivw_se_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.ivw 3, v8, v16, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i64.nxv8i32.iXLen.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i64.nxv8i32.iXLen.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, iXLen, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_ivw_se_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv1i16.iXLen.nxv1i8.iXLen.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv1i16.iXLen.nxv1i8.iXLen.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i8>, iXLen, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_ivw_se_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv2i16.iXLen.nxv2i8.iXLen.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv2i16.iXLen.nxv2i8.iXLen.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i8>, iXLen, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_ivw_se_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv4i16.iXLen.nxv4i8.iXLen.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv4i16.iXLen.nxv4i8.iXLen.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i8>, iXLen, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_ivw_se_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv8i16.iXLen.nxv8i8.iXLen.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv8i16.iXLen.nxv8i8.iXLen.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i8>, iXLen, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_ivw_se_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv16i16.iXLen.nxv16i8.iXLen.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv16i16.iXLen.nxv16i8.iXLen.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i8>, iXLen, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_ivw_se_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv32i16.iXLen.nxv32i8.iXLen.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv32i16.iXLen.nxv32i8.iXLen.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i8>, iXLen, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_ivw_se_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv1i32.iXLen.nxv1i16.iXLen.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv1i32.iXLen.nxv1i16.iXLen.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i16>, iXLen, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_ivw_se_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv2i32.iXLen.nxv2i16.iXLen.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv2i32.iXLen.nxv2i16.iXLen.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i16>, iXLen, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_ivw_se_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv4i32.iXLen.nxv4i16.iXLen.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv4i32.iXLen.nxv4i16.iXLen.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i16>, iXLen, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_ivw_se_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv8i32.iXLen.nxv8i16.iXLen.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv8i32.iXLen.nxv8i16.iXLen.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i16>, iXLen, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_ivw_se_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv16i32.iXLen.nxv16i16.iXLen.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv16i32.iXLen.nxv16i16.iXLen.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i16>, iXLen, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_ivw_se_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv1i64.iXLen.nxv1i32.iXLen.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv1i64.iXLen.nxv1i32.iXLen.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i32>, iXLen, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_ivw_se_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv2i64.iXLen.nxv2i32.iXLen.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv2i64.iXLen.nxv2i32.iXLen.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i32>, iXLen, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_ivw_se_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv4i64.iXLen.nxv4i32.iXLen.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv4i64.iXLen.nxv4i32.iXLen.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i32>, iXLen, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_ivw_se_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, iXLen, iXLen)
+
+define <vscale x 1 x i16> @test_sf_vc_v_ivw_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivw.nxv1i16.iXLen.nxv1i8.iXLen.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 1 x i16> %0
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivw.nxv1i16.iXLen.nxv1i8.iXLen.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i8>, iXLen, iXLen)
+
+define <vscale x 2 x i16> @test_sf_vc_v_ivw_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivw.nxv2i16.iXLen.nxv2i8.iXLen.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 2 x i16> %0
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivw.nxv2i16.iXLen.nxv2i8.iXLen.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i8>, iXLen, iXLen)
+
+define <vscale x 4 x i16> @test_sf_vc_v_ivw_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivw.nxv4i16.iXLen.nxv4i8.iXLen.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 4 x i16> %0
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivw.nxv4i16.iXLen.nxv4i8.iXLen.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i8>, iXLen, iXLen)
+
+define <vscale x 8 x i16> @test_sf_vc_v_ivw_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivw.nxv8i16.iXLen.nxv8i8.iXLen.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 8 x i16> %0
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivw.nxv8i16.iXLen.nxv8i8.iXLen.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i8>, iXLen, iXLen)
+
+define <vscale x 16 x i16> @test_sf_vc_v_ivw_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivw.nxv16i16.iXLen.nxv16i8.iXLen.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 16 x i16> %0
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivw.nxv16i16.iXLen.nxv16i8.iXLen.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i8>, iXLen, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vc_v_ivw_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivw.nxv32i16.iXLen.nxv32i8.iXLen.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivw.nxv32i16.iXLen.nxv32i8.iXLen.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i8>, iXLen, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_ivw_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivw.nxv1i32.iXLen.nxv1i16.iXLen.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivw.nxv1i32.iXLen.nxv1i16.iXLen.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i16>, iXLen, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_ivw_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivw.nxv2i32.iXLen.nxv2i16.iXLen.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivw.nxv2i32.iXLen.nxv2i16.iXLen.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i16>, iXLen, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_ivw_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivw.nxv4i32.iXLen.nxv4i16.iXLen.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivw.nxv4i32.iXLen.nxv4i16.iXLen.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i16>, iXLen, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_ivw_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivw.nxv8i32.iXLen.nxv8i16.iXLen.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivw.nxv8i32.iXLen.nxv8i16.iXLen.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i16>, iXLen, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_ivw_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivw.nxv16i32.iXLen.nxv16i16.iXLen.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivw.nxv16i32.iXLen.nxv16i16.iXLen.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i16>, iXLen, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_ivw_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivw.nxv1i64.iXLen.nxv1i32.iXLen.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivw.nxv1i64.iXLen.nxv1i32.iXLen.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i32>, iXLen, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_ivw_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivw.nxv2i64.iXLen.nxv2i32.iXLen.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivw.nxv2i64.iXLen.nxv2i32.iXLen.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i32>, iXLen, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_ivw_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivw.nxv4i64.iXLen.nxv4i32.iXLen.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivw.nxv4i64.iXLen.nxv4i32.iXLen.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i32>, iXLen, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_ivw_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivw.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivw.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, iXLen, iXLen)
+
+define void @test_sf_vc_fvw_se_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvw_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1i32.nxv1i16.f16.iXLen(iXLen 1, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1i32.nxv1i16.f16.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i16>, half, iXLen)
+
+define void @test_sf_vc_fvw_se_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvw_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2i32.nxv2i16.f16.iXLen(iXLen 1, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2i32.nxv2i16.f16.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i16>, half, iXLen)
+
+define void @test_sf_vc_fvw_se_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvw_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.fvw 1, v8, v10, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4i32.nxv4i16.f16.iXLen(iXLen 1, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4i32.nxv4i16.f16.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i16>, half, iXLen)
+
+define void @test_sf_vc_fvw_se_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvw_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.fvw 1, v8, v12, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8i32.nxv8i16.f16.iXLen(iXLen 1, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8i32.nxv8i16.f16.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i16>, half, iXLen)
+
+define void @test_sf_vc_fvw_se_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvw_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.fvw 1, v8, v16, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv16i32.nxv16i16.f16.iXLen(iXLen 1, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv16i32.nxv16i16.f16.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i16>, half, iXLen)
+
+define void @test_sf_vc_fvw_se_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvw_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1i64.nxv1i32.f32.iXLen(iXLen 1, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1i64.nxv1i32.f32.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i32>, float, iXLen)
+
+define void @test_sf_vc_fvw_se_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvw_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.fvw 1, v8, v10, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2i64.nxv2i32.f32.iXLen(iXLen 1, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2i64.nxv2i32.f32.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i32>, float, iXLen)
+
+define void @test_sf_vc_fvw_se_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvw_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.fvw 1, v8, v12, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4i64.nxv4i32.f32.iXLen(iXLen 1, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4i64.nxv4i32.f32.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i32>, float, iXLen)
+
+define void @test_sf_vc_fvw_se_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvw_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.fvw 1, v8, v16, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8i64.nxv8i32.f32.iXLen(iXLen 1, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8i64.nxv8i32.f32.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, float, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_fvw_se_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv1i32.iXLen.nxv1i16.f16.iXLen(iXLen 1, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv1i32.iXLen.nxv1i16.f16.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i16>, half, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_fvw_se_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv2i32.iXLen.nxv2i16.f16.iXLen(iXLen 1, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv2i32.iXLen.nxv2i16.f16.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i16>, half, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_fvw_se_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv4i32.iXLen.nxv4i16.f16.iXLen(iXLen 1, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv4i32.iXLen.nxv4i16.f16.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i16>, half, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_fvw_se_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv8i32.iXLen.nxv8i16.f16.iXLen(iXLen 1, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv8i32.iXLen.nxv8i16.f16.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i16>, half, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_fvw_se_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v16, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv16i32.iXLen.nxv16i16.f16.iXLen(iXLen 1, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv16i32.iXLen.nxv16i16.f16.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i16>, half, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_fvw_se_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv1i64.iXLen.nxv1i32.f32.iXLen(iXLen 1, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv1i64.iXLen.nxv1i32.f32.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i32>, float, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_fvw_se_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv2i64.iXLen.nxv2i32.f32.iXLen(iXLen 1, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv2i64.iXLen.nxv2i32.f32.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i32>, float, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_fvw_se_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv4i64.iXLen.nxv4i32.f32.iXLen(iXLen 1, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv4i64.iXLen.nxv4i32.f32.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i32>, float, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_fvw_se_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v16, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv8i64.iXLen.nxv8i32.f32.iXLen(iXLen 1, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv8i64.iXLen.nxv8i32.f32.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, float, iXLen)
+
+define <vscale x 1 x i32> @test_sf_vc_v_fvw_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvw.nxv1i32.iXLen.nxv1i16.f16.iXLen(iXLen 1, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 1 x i32> %0
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvw.nxv1i32.iXLen.nxv1i16.f16.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i16>, half, iXLen)
+
+define <vscale x 2 x i32> @test_sf_vc_v_fvw_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvw.nxv2i32.iXLen.nxv2i16.f16.iXLen(iXLen 1, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 2 x i32> %0
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvw.nxv2i32.iXLen.nxv2i16.f16.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i16>, half, iXLen)
+
+define <vscale x 4 x i32> @test_sf_vc_v_fvw_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvw.nxv4i32.iXLen.nxv4i16.f16.iXLen(iXLen 1, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 4 x i32> %0
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvw.nxv4i32.iXLen.nxv4i16.f16.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i16>, half, iXLen)
+
+define <vscale x 8 x i32> @test_sf_vc_v_fvw_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvw.nxv8i32.iXLen.nxv8i16.f16.iXLen(iXLen 1, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 8 x i32> %0
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvw.nxv8i32.iXLen.nxv8i16.f16.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i16>, half, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vc_v_fvw_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v16, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvw.nxv16i32.iXLen.nxv16i16.f16.iXLen(iXLen 1, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvw.nxv16i32.iXLen.nxv16i16.f16.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i16>, half, iXLen)
+
+define <vscale x 1 x i64> @test_sf_vc_v_fvw_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvw.nxv1i64.iXLen.nxv1i32.f32.iXLen(iXLen 1, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <vscale x 1 x i64> %0
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvw.nxv1i64.iXLen.nxv1i32.f32.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i32>, float, iXLen)
+
+define <vscale x 2 x i64> @test_sf_vc_v_fvw_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvw.nxv2i64.iXLen.nxv2i32.f32.iXLen(iXLen 1, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <vscale x 2 x i64> %0
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvw.nxv2i64.iXLen.nxv2i32.f32.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i32>, float, iXLen)
+
+define <vscale x 4 x i64> @test_sf_vc_v_fvw_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvw.nxv4i64.iXLen.nxv4i32.f32.iXLen(iXLen 1, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <vscale x 4 x i64> %0
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvw.nxv4i64.iXLen.nxv4i32.f32.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i32>, float, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vc_v_fvw_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v16, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvw.nxv8i64.iXLen.nxv8i32.f32.iXLen(iXLen 1, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvw.nxv8i64.iXLen.nxv8i32.f32.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, float, iXLen)
More information about the llvm-commits
mailing list