[llvm] [RISCV] Support VLS for VCIX (PR #67289)
Brandon Wu via llvm-commits
llvm-commits at lists.llvm.org
Mon Sep 25 20:20:05 PDT 2023
https://github.com/4vtomat updated https://github.com/llvm/llvm-project/pull/67289
>From 8572286d42aef270821d7df6d27fc22485f98522 Mon Sep 17 00:00:00 2001
From: Brandon Wu <brandon.wu at sifive.com>
Date: Sun, 24 Sep 2023 01:58:05 -0700
Subject: [PATCH 1/2] [RISCV] Support VLS for VCIX
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 153 +
.../RISCV/rvv/fixed-vectors-xsfvcp-x.ll | 1565 +++++++++
.../RISCV/rvv/fixed-vectors-xsfvcp-xv.ll | 3008 +++++++++++++++++
.../RISCV/rvv/fixed-vectors-xsfvcp-xvv.ll | 3008 +++++++++++++++++
.../RISCV/rvv/fixed-vectors-xsfvcp-xvw.ll | 2111 ++++++++++++
5 files changed, 9845 insertions(+)
create mode 100644 llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-x.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xv.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xvv.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xvw.ll
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 8e6644821031c17..5fdfb7b6bf9a264 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -8043,6 +8043,43 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
report_fatal_error("EGW should be greater than or equal to 4 * SEW.");
return Op;
}
+ case Intrinsic::riscv_sf_vc_v_x:
+ case Intrinsic::riscv_sf_vc_v_i:
+ case Intrinsic::riscv_sf_vc_v_xv:
+ case Intrinsic::riscv_sf_vc_v_iv:
+ case Intrinsic::riscv_sf_vc_v_vv:
+ case Intrinsic::riscv_sf_vc_v_fv:
+ case Intrinsic::riscv_sf_vc_v_xvv:
+ case Intrinsic::riscv_sf_vc_v_ivv:
+ case Intrinsic::riscv_sf_vc_v_vvv:
+ case Intrinsic::riscv_sf_vc_v_fvv:
+ case Intrinsic::riscv_sf_vc_v_xvw:
+ case Intrinsic::riscv_sf_vc_v_ivw:
+ case Intrinsic::riscv_sf_vc_v_vvw:
+ case Intrinsic::riscv_sf_vc_v_fvw: {
+ MVT VT = Op.getSimpleValueType();
+
+ if (!VT.isFixedLengthVector())
+ break;
+
+ SmallVector<SDValue, 6> Ops;
+ for (const SDValue &V : Op->op_values()) {
+ // Skip non-fixed vector operands.
+ if (!V.getValueType().isFixedLengthVector()) {
+ Ops.push_back(V);
+ continue;
+ }
+
+ MVT OpContainerVT =
+ getContainerForFixedLengthVector(V.getSimpleValueType());
+ Ops.push_back(convertToScalableVector(OpContainerVT, V, DAG, Subtarget));
+ }
+
+ MVT RetContainerVT = getContainerForFixedLengthVector(VT);
+ SDValue Scalable =
+ DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, RetContainerVT, Ops);
+ return convertFromScalableVector(VT, Scalable, DAG, Subtarget);
+ }
}
return lowerVectorIntrinsicScalars(Op, DAG, Subtarget);
@@ -8163,6 +8200,46 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
Results.push_back(Result.getValue(NF));
return DAG.getMergeValues(Results, DL);
}
+ case Intrinsic::riscv_sf_vc_v_x_se:
+ case Intrinsic::riscv_sf_vc_v_i_se:
+ case Intrinsic::riscv_sf_vc_v_xv_se:
+ case Intrinsic::riscv_sf_vc_v_iv_se:
+ case Intrinsic::riscv_sf_vc_v_vv_se:
+ case Intrinsic::riscv_sf_vc_v_fv_se:
+ case Intrinsic::riscv_sf_vc_v_xvv_se:
+ case Intrinsic::riscv_sf_vc_v_ivv_se:
+ case Intrinsic::riscv_sf_vc_v_vvv_se:
+ case Intrinsic::riscv_sf_vc_v_fvv_se:
+ case Intrinsic::riscv_sf_vc_v_xvw_se:
+ case Intrinsic::riscv_sf_vc_v_ivw_se:
+ case Intrinsic::riscv_sf_vc_v_vvw_se:
+ case Intrinsic::riscv_sf_vc_v_fvw_se: {
+ MVT VT = Op.getSimpleValueType();
+
+ if (!VT.isFixedLengthVector())
+ break;
+
+ SmallVector<SDValue, 6> Ops;
+ for (const SDValue &V : Op->op_values()) {
+ // Skip non-fixed vector operands.
+ if (!V.getValueType().isFixedLengthVector()) {
+ Ops.push_back(V);
+ continue;
+ }
+
+ MVT OpContainerVT =
+ getContainerForFixedLengthVector(V.getSimpleValueType());
+ Ops.push_back(convertToScalableVector(OpContainerVT, V, DAG, Subtarget));
+ }
+
+ SDLoc DL(Op);
+ MVT RetContainerVT = getContainerForFixedLengthVector(VT);
+ SDValue ScalableVector =
+ DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, RetContainerVT, Ops);
+ SDValue FixedVector =
+ convertFromScalableVector(VT, ScalableVector, DAG, Subtarget);
+ return DAG.getMergeValues({FixedVector, Op.getOperand(0)}, DL);
+ }
}
return lowerVectorIntrinsicScalars(Op, DAG, Subtarget);
@@ -8250,6 +8327,82 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), Ops,
FixedIntrinsic->getMemoryVT(), FixedIntrinsic->getMemOperand());
}
+ case Intrinsic::riscv_sf_vc_x_se_e8mf8:
+ case Intrinsic::riscv_sf_vc_x_se_e8mf4:
+ case Intrinsic::riscv_sf_vc_x_se_e8mf2:
+ case Intrinsic::riscv_sf_vc_x_se_e8m1:
+ case Intrinsic::riscv_sf_vc_x_se_e8m2:
+ case Intrinsic::riscv_sf_vc_x_se_e8m4:
+ case Intrinsic::riscv_sf_vc_x_se_e8m8:
+ case Intrinsic::riscv_sf_vc_x_se_e16mf4:
+ case Intrinsic::riscv_sf_vc_x_se_e16mf2:
+ case Intrinsic::riscv_sf_vc_x_se_e16m1:
+ case Intrinsic::riscv_sf_vc_x_se_e16m2:
+ case Intrinsic::riscv_sf_vc_x_se_e16m4:
+ case Intrinsic::riscv_sf_vc_x_se_e16m8:
+ case Intrinsic::riscv_sf_vc_x_se_e32mf2:
+ case Intrinsic::riscv_sf_vc_x_se_e32m1:
+ case Intrinsic::riscv_sf_vc_x_se_e32m2:
+ case Intrinsic::riscv_sf_vc_x_se_e32m4:
+ case Intrinsic::riscv_sf_vc_x_se_e32m8:
+ case Intrinsic::riscv_sf_vc_x_se_e64m1:
+ case Intrinsic::riscv_sf_vc_x_se_e64m2:
+ case Intrinsic::riscv_sf_vc_x_se_e64m4:
+ case Intrinsic::riscv_sf_vc_x_se_e64m8:
+ case Intrinsic::riscv_sf_vc_i_se_e8mf8:
+ case Intrinsic::riscv_sf_vc_i_se_e8mf4:
+ case Intrinsic::riscv_sf_vc_i_se_e8mf2:
+ case Intrinsic::riscv_sf_vc_i_se_e8m1:
+ case Intrinsic::riscv_sf_vc_i_se_e8m2:
+ case Intrinsic::riscv_sf_vc_i_se_e8m4:
+ case Intrinsic::riscv_sf_vc_i_se_e8m8:
+ case Intrinsic::riscv_sf_vc_i_se_e16mf4:
+ case Intrinsic::riscv_sf_vc_i_se_e16mf2:
+ case Intrinsic::riscv_sf_vc_i_se_e16m1:
+ case Intrinsic::riscv_sf_vc_i_se_e16m2:
+ case Intrinsic::riscv_sf_vc_i_se_e16m4:
+ case Intrinsic::riscv_sf_vc_i_se_e16m8:
+ case Intrinsic::riscv_sf_vc_i_se_e32mf2:
+ case Intrinsic::riscv_sf_vc_i_se_e32m1:
+ case Intrinsic::riscv_sf_vc_i_se_e32m2:
+ case Intrinsic::riscv_sf_vc_i_se_e32m4:
+ case Intrinsic::riscv_sf_vc_i_se_e32m8:
+ case Intrinsic::riscv_sf_vc_i_se_e64m1:
+ case Intrinsic::riscv_sf_vc_i_se_e64m2:
+ case Intrinsic::riscv_sf_vc_i_se_e64m4:
+ case Intrinsic::riscv_sf_vc_i_se_e64m8:
+ case Intrinsic::riscv_sf_vc_xv_se:
+ case Intrinsic::riscv_sf_vc_iv_se:
+ case Intrinsic::riscv_sf_vc_vv_se:
+ case Intrinsic::riscv_sf_vc_fv_se:
+ case Intrinsic::riscv_sf_vc_xvv_se:
+ case Intrinsic::riscv_sf_vc_ivv_se:
+ case Intrinsic::riscv_sf_vc_vvv_se:
+ case Intrinsic::riscv_sf_vc_fvv_se:
+ case Intrinsic::riscv_sf_vc_xvw_se:
+ case Intrinsic::riscv_sf_vc_ivw_se:
+ case Intrinsic::riscv_sf_vc_vvw_se:
+ case Intrinsic::riscv_sf_vc_fvw_se: {
+ if (!llvm::any_of(Op->op_values(), [&](const SDValue &V) {
+ return V.getValueType().isFixedLengthVector();
+ }))
+ break;
+
+ SmallVector<SDValue, 6> Ops;
+ for (const SDValue &V : Op->op_values()) {
+ // Skip non-fixed vector operands.
+ if (!V.getValueType().isFixedLengthVector()) {
+ Ops.push_back(V);
+ continue;
+ }
+
+ MVT OpContainerVT =
+ getContainerForFixedLengthVector(V.getSimpleValueType());
+ Ops.push_back(convertToScalableVector(OpContainerVT, V, DAG, Subtarget));
+ }
+
+ return DAG.getNode(ISD::INTRINSIC_VOID, SDLoc(Op), Op->getVTList(), Ops);
+ }
}
return lowerVectorIntrinsicScalars(Op, DAG, Subtarget);
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-x.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-x.ll
new file mode 100644
index 000000000000000..68b92f975a52d9f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-x.ll
@@ -0,0 +1,1565 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+xsfvcp \
+; RUN: -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvcp \
+; RUN: -verify-machineinstrs | FileCheck %s
+
+define void @test_sf_vc_x_se_e8mf8(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e8mf8.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e8mf8.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen)
+
+define void @test_sf_vc_x_se_e8mf4(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e8mf4.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e8mf4.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen)
+
+define void @test_sf_vc_x_se_e8mf2(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e8mf2.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e8mf2.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen)
+
+define void @test_sf_vc_x_se_e8m1(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e8m1.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e8m1.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen)
+
+define void @test_sf_vc_x_se_e8m2(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e8m2.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e8m2.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen)
+
+define void @test_sf_vc_x_se_e8m4(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e8m4.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e8m4.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen)
+
+define void @test_sf_vc_x_se_e8m8(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e8m8.iXLen.i8.iXLen(iXLen 3, iXLen 31, iXLen 31, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e8m8.iXLen.i8.iXLen(iXLen, iXLen, iXLen, i8, iXLen)
+
+define void @test_sf_vc_x_se_e16mf4(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e16mf4.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e16mf4.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen)
+
+define void @test_sf_vc_x_se_e16mf2(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e16mf2.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e16mf2.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen)
+
+define void @test_sf_vc_x_se_e16m1(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e16m1.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e16m1.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen)
+
+define void @test_sf_vc_x_se_e16m2(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e16m2.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e16m2.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen)
+
+define void @test_sf_vc_x_se_e16m4(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e16m4.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e16m4.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen)
+
+define void @test_sf_vc_x_se_e16m8(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e16m8.iXLen.i16.iXLen(iXLen 3, iXLen 31, iXLen 31, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e16m8.iXLen.i16.iXLen(iXLen, iXLen, iXLen, i16, iXLen)
+
+define void @test_sf_vc_x_se_e32mf2(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e32mf2.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e32mf2.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen)
+
+define void @test_sf_vc_x_se_e32m1(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e32m1.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e32m1.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen)
+
+define void @test_sf_vc_x_se_e32m2(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e32m2.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e32m2.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen)
+
+define void @test_sf_vc_x_se_e32m4(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e32m4.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e32m4.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen)
+
+define void @test_sf_vc_x_se_e32m8(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_x_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: sf.vc.x 3, 31, 31, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.x.se.e32m8.iXLen.i32.iXLen(iXLen 3, iXLen 31, iXLen 31, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.x.se.e32m8.iXLen.i32.iXLen(iXLen, iXLen, iXLen, i32, iXLen)
+
+define <1 x i8> @test_sf_vc_v_x_se_e8mf8(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i8> @llvm.riscv.sf.vc.v.x.se.nxv1i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl)
+ ret <1 x i8> %0
+}
+
+declare <1 x i8> @llvm.riscv.sf.vc.v.x.se.nxv1i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen)
+
+define <2 x i8> @test_sf_vc_v_x_se_e8mf4(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i8> @llvm.riscv.sf.vc.v.x.se.nxv2i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl)
+ ret <2 x i8> %0
+}
+
+declare <2 x i8> @llvm.riscv.sf.vc.v.x.se.nxv2i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen)
+
+define <4 x i8> @test_sf_vc_v_x_se_e8mf2(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i8> @llvm.riscv.sf.vc.v.x.se.nxv4i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl)
+ ret <4 x i8> %0
+}
+
+declare <4 x i8> @llvm.riscv.sf.vc.v.x.se.nxv4i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen)
+
+define <8 x i8> @test_sf_vc_v_x_se_e8m1(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i8> @llvm.riscv.sf.vc.v.x.se.nxv8i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl)
+ ret <8 x i8> %0
+}
+
+declare <8 x i8> @llvm.riscv.sf.vc.v.x.se.nxv8i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen)
+
+define <16 x i8> @test_sf_vc_v_x_se_e8m2(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i8> @llvm.riscv.sf.vc.v.x.se.nxv16i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl)
+ ret <16 x i8> %0
+}
+
+declare <16 x i8> @llvm.riscv.sf.vc.v.x.se.nxv16i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen)
+
+define <32 x i8> @test_sf_vc_v_x_se_e8m4(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i8> @llvm.riscv.sf.vc.v.x.se.nxv32i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl)
+ ret <32 x i8> %0
+}
+
+declare <32 x i8> @llvm.riscv.sf.vc.v.x.se.nxv32i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen)
+
+define <64 x i8> @test_sf_vc_v_x_se_e8m8(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <64 x i8> @llvm.riscv.sf.vc.v.x.se.nxv64i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl)
+ ret <64 x i8> %0
+}
+
+declare <64 x i8> @llvm.riscv.sf.vc.v.x.se.nxv64i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen)
+
+define <1 x i16> @test_sf_vc_v_x_se_e16mf4(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.x.se.nxv1i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl)
+ ret <1 x i16> %0
+}
+
+declare <1 x i16> @llvm.riscv.sf.vc.v.x.se.nxv1i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen)
+
+define <2 x i16> @test_sf_vc_v_x_se_e16mf2(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.x.se.nxv2i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl)
+ ret <2 x i16> %0
+}
+
+declare <2 x i16> @llvm.riscv.sf.vc.v.x.se.nxv2i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen)
+
+define <4 x i16> @test_sf_vc_v_x_se_e16m1(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.x.se.nxv4i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl)
+ ret <4 x i16> %0
+}
+
+declare <4 x i16> @llvm.riscv.sf.vc.v.x.se.nxv4i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen)
+
+define <8 x i16> @test_sf_vc_v_x_se_e16m2(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.x.se.nxv8i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl)
+ ret <8 x i16> %0
+}
+
+declare <8 x i16> @llvm.riscv.sf.vc.v.x.se.nxv8i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen)
+
+define <16 x i16> @test_sf_vc_v_x_se_e16m4(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.x.se.nxv16i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl)
+ ret <16 x i16> %0
+}
+
+declare <16 x i16> @llvm.riscv.sf.vc.v.x.se.nxv16i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen)
+
+define <32 x i16> @test_sf_vc_v_x_se_e16m8(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.x.se.nxv32i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl)
+ ret <32 x i16> %0
+}
+
+declare <32 x i16> @llvm.riscv.sf.vc.v.x.se.nxv32i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen)
+
+define <1 x i32> @test_sf_vc_v_x_se_e32mf2(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.x.se.nxv1i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl)
+ ret <1 x i32> %0
+}
+
+declare <1 x i32> @llvm.riscv.sf.vc.v.x.se.nxv1i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen)
+
+define <2 x i32> @test_sf_vc_v_x_se_e32m1(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.x.se.nxv2i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl)
+ ret <2 x i32> %0
+}
+
+declare <2 x i32> @llvm.riscv.sf.vc.v.x.se.nxv2i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen)
+
+define <4 x i32> @test_sf_vc_v_x_se_e32m2(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.x.se.nxv4i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl)
+ ret <4 x i32> %0
+}
+
+declare <4 x i32> @llvm.riscv.sf.vc.v.x.se.nxv4i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen)
+
+define <8 x i32> @test_sf_vc_v_x_se_e32m4(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.x.se.nxv8i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl)
+ ret <8 x i32> %0
+}
+
+declare <8 x i32> @llvm.riscv.sf.vc.v.x.se.nxv8i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen)
+
+define <16 x i32> @test_sf_vc_v_x_se_e32m8(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.x.se.nxv16i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl)
+ ret <16 x i32> %0
+}
+
+declare <16 x i32> @llvm.riscv.sf.vc.v.x.se.nxv16i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen)
+
+define <1 x i8> @test_sf_vc_v_x_e8mf8(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i8> @llvm.riscv.sf.vc.v.x.nxv1i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl)
+ ret <1 x i8> %0
+}
+
+declare <1 x i8> @llvm.riscv.sf.vc.v.x.nxv1i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen)
+
+define <2 x i8> @test_sf_vc_v_x_e8mf4(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i8> @llvm.riscv.sf.vc.v.x.nxv2i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl)
+ ret <2 x i8> %0
+}
+
+declare <2 x i8> @llvm.riscv.sf.vc.v.x.nxv2i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen)
+
+define <4 x i8> @test_sf_vc_v_x_e8mf2(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i8> @llvm.riscv.sf.vc.v.x.nxv4i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl)
+ ret <4 x i8> %0
+}
+
+declare <4 x i8> @llvm.riscv.sf.vc.v.x.nxv4i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen)
+
+define <8 x i8> @test_sf_vc_v_x_e8m1(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i8> @llvm.riscv.sf.vc.v.x.nxv8i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl)
+ ret <8 x i8> %0
+}
+
+declare <8 x i8> @llvm.riscv.sf.vc.v.x.nxv8i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen)
+
+define <16 x i8> @test_sf_vc_v_x_e8m2(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i8> @llvm.riscv.sf.vc.v.x.nxv16i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl)
+ ret <16 x i8> %0
+}
+
+declare <16 x i8> @llvm.riscv.sf.vc.v.x.nxv16i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen)
+
+define <32 x i8> @test_sf_vc_v_x_e8m4(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i8> @llvm.riscv.sf.vc.v.x.nxv32i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl)
+ ret <32 x i8> %0
+}
+
+declare <32 x i8> @llvm.riscv.sf.vc.v.x.nxv32i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen)
+
+define <64 x i8> @test_sf_vc_v_x_e8m8(i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <64 x i8> @llvm.riscv.sf.vc.v.x.nxv64i8.iXLen.i8.iXLen(iXLen 3, iXLen 31, i8 %rs1, iXLen %vl)
+ ret <64 x i8> %0
+}
+
+declare <64 x i8> @llvm.riscv.sf.vc.v.x.nxv64i8.iXLen.i8.iXLen(iXLen, iXLen, i8, iXLen)
+
+define <1 x i16> @test_sf_vc_v_x_e16mf4(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.x.nxv1i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl)
+ ret <1 x i16> %0
+}
+
+declare <1 x i16> @llvm.riscv.sf.vc.v.x.nxv1i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen)
+
+define <2 x i16> @test_sf_vc_v_x_e16mf2(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.x.nxv2i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl)
+ ret <2 x i16> %0
+}
+
+declare <2 x i16> @llvm.riscv.sf.vc.v.x.nxv2i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen)
+
+define <4 x i16> @test_sf_vc_v_x_e16m1(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.x.nxv4i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl)
+ ret <4 x i16> %0
+}
+
+declare <4 x i16> @llvm.riscv.sf.vc.v.x.nxv4i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen)
+
+define <8 x i16> @test_sf_vc_v_x_e16m2(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.x.nxv8i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl)
+ ret <8 x i16> %0
+}
+
+declare <8 x i16> @llvm.riscv.sf.vc.v.x.nxv8i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen)
+
+define <16 x i16> @test_sf_vc_v_x_e16m4(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.x.nxv16i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl)
+ ret <16 x i16> %0
+}
+
+declare <16 x i16> @llvm.riscv.sf.vc.v.x.nxv16i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen)
+
+define <32 x i16> @test_sf_vc_v_x_e16m8(i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.x.nxv32i16.iXLen.i16.iXLen(iXLen 3, iXLen 31, i16 %rs1, iXLen %vl)
+ ret <32 x i16> %0
+}
+
+declare <32 x i16> @llvm.riscv.sf.vc.v.x.nxv32i16.iXLen.i16.iXLen(iXLen, iXLen, i16, iXLen)
+
+define <1 x i32> @test_sf_vc_v_x_e32mf2(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.x.nxv1i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl)
+ ret <1 x i32> %0
+}
+
+declare <1 x i32> @llvm.riscv.sf.vc.v.x.nxv1i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen)
+
+define <2 x i32> @test_sf_vc_v_x_e32m1(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.x.nxv2i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl)
+ ret <2 x i32> %0
+}
+
+declare <2 x i32> @llvm.riscv.sf.vc.v.x.nxv2i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen)
+
+define <4 x i32> @test_sf_vc_v_x_e32m2(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.x.nxv4i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl)
+ ret <4 x i32> %0
+}
+
+declare <4 x i32> @llvm.riscv.sf.vc.v.x.nxv4i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen)
+
+define <8 x i32> @test_sf_vc_v_x_e32m4(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.x.nxv8i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl)
+ ret <8 x i32> %0
+}
+
+declare <8 x i32> @llvm.riscv.sf.vc.v.x.nxv8i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen)
+
+define <16 x i32> @test_sf_vc_v_x_e32m8(i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_x_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.x 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.x.nxv16i32.iXLen.i32.iXLen(iXLen 3, iXLen 31, i32 %rs1, iXLen %vl)
+ ret <16 x i32> %0
+}
+
+declare <16 x i32> @llvm.riscv.sf.vc.v.x.nxv16i32.iXLen.i32.iXLen(iXLen, iXLen, i32, iXLen)
+
+define void @test_sf_vc_i_se_e8mf8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e8mf8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e8mf8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e8mf4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e8mf4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e8mf4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e8mf2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e8mf2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e8mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e8m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e8m1.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e8m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e8m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e8m2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e8m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e8m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e8m4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e8m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e8m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e8m8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e8m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e16mf4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e16mf4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e16mf4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e16mf2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e16mf2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e16mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e16m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e16m1.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e16m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e16m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e16m2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e16m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e16m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e16m4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e16m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e16m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e16m8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e16m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e32mf2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e32mf2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e32mf2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e32m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e32m1.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e32m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e32m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e32m2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e32m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e32m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e32m4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e32m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e32m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e32m8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e32m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e64m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e64m1.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e64m1.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e64m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e64m2.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e64m2.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e64m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e64m4.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e64m4.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_vc_i_se_e64m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_i_se_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: sf.vc.i 3, 31, 31, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.i.se.e64m8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 31, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.i.se.e64m8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+
+define <1 x i8> @test_sf_vc_v_i_se_e8mf8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i8> @llvm.riscv.sf.vc.v.i.se.nxv1i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <1 x i8> %0
+}
+
+declare <1 x i8> @llvm.riscv.sf.vc.v.i.se.nxv1i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <2 x i8> @test_sf_vc_v_i_se_e8mf4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i8> @llvm.riscv.sf.vc.v.i.se.nxv2i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <2 x i8> %0
+}
+
+declare <2 x i8> @llvm.riscv.sf.vc.v.i.se.nxv2i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <4 x i8> @test_sf_vc_v_i_se_e8mf2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i8> @llvm.riscv.sf.vc.v.i.se.nxv4i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <4 x i8> %0
+}
+
+declare <4 x i8> @llvm.riscv.sf.vc.v.i.se.nxv4i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <8 x i8> @test_sf_vc_v_i_se_e8m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i8> @llvm.riscv.sf.vc.v.i.se.nxv8i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <8 x i8> %0
+}
+
+declare <8 x i8> @llvm.riscv.sf.vc.v.i.se.nxv8i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <16 x i8> @test_sf_vc_v_i_se_e8m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i8> @llvm.riscv.sf.vc.v.i.se.nxv16i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <16 x i8> %0
+}
+
+declare <16 x i8> @llvm.riscv.sf.vc.v.i.se.nxv16i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <32 x i8> @test_sf_vc_v_i_se_e8m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i8> @llvm.riscv.sf.vc.v.i.se.nxv32i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <32 x i8> %0
+}
+
+declare <32 x i8> @llvm.riscv.sf.vc.v.i.se.nxv32i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <64 x i8> @test_sf_vc_v_i_se_e8m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <64 x i8> @llvm.riscv.sf.vc.v.i.se.nxv64i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <64 x i8> %0
+}
+
+declare <64 x i8> @llvm.riscv.sf.vc.v.i.se.nxv64i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <1 x i16> @test_sf_vc_v_i_se_e16mf4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.i.se.nxv1i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <1 x i16> %0
+}
+
+declare <1 x i16> @llvm.riscv.sf.vc.v.i.se.nxv1i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <2 x i16> @test_sf_vc_v_i_se_e16mf2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.i.se.nxv2i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <2 x i16> %0
+}
+
+declare <2 x i16> @llvm.riscv.sf.vc.v.i.se.nxv2i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <4 x i16> @test_sf_vc_v_i_se_e16m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.i.se.nxv4i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <4 x i16> %0
+}
+
+declare <4 x i16> @llvm.riscv.sf.vc.v.i.se.nxv4i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <8 x i16> @test_sf_vc_v_i_se_e16m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.i.se.nxv8i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <8 x i16> %0
+}
+
+declare <8 x i16> @llvm.riscv.sf.vc.v.i.se.nxv8i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <16 x i16> @test_sf_vc_v_i_se_e16m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.i.se.nxv16i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <16 x i16> %0
+}
+
+declare <16 x i16> @llvm.riscv.sf.vc.v.i.se.nxv16i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <32 x i16> @test_sf_vc_v_i_se_e16m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.i.se.nxv32i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <32 x i16> %0
+}
+
+declare <32 x i16> @llvm.riscv.sf.vc.v.i.se.nxv32i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <1 x i32> @test_sf_vc_v_i_se_e32mf2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.i.se.nxv1i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <1 x i32> %0
+}
+
+declare <1 x i32> @llvm.riscv.sf.vc.v.i.se.nxv1i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <2 x i32> @test_sf_vc_v_i_se_e32m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.i.se.nxv2i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <2 x i32> %0
+}
+
+declare <2 x i32> @llvm.riscv.sf.vc.v.i.se.nxv2i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <4 x i32> @test_sf_vc_v_i_se_e32m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.i.se.nxv4i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <4 x i32> %0
+}
+
+declare <4 x i32> @llvm.riscv.sf.vc.v.i.se.nxv4i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <8 x i32> @test_sf_vc_v_i_se_e32m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.i.se.nxv8i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <8 x i32> %0
+}
+
+declare <8 x i32> @llvm.riscv.sf.vc.v.i.se.nxv8i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <16 x i32> @test_sf_vc_v_i_se_e32m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.i.se.nxv16i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <16 x i32> %0
+}
+
+declare <16 x i32> @llvm.riscv.sf.vc.v.i.se.nxv16i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <1 x i64> @test_sf_vc_v_i_se_e64m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.i.se.nxv1i64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <1 x i64> %0
+}
+
+declare <1 x i64> @llvm.riscv.sf.vc.v.i.se.nxv1i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <2 x i64> @test_sf_vc_v_i_se_e64m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.i.se.nxv2i64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <2 x i64> %0
+}
+
+declare <2 x i64> @llvm.riscv.sf.vc.v.i.se.nxv2i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <4 x i64> @test_sf_vc_v_i_se_e64m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.i.se.nxv4i64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <4 x i64> %0
+}
+
+declare <4 x i64> @llvm.riscv.sf.vc.v.i.se.nxv4i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <8 x i64> @test_sf_vc_v_i_se_e64m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_se_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.i.se.nxv8i64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <8 x i64> %0
+}
+
+declare <8 x i64> @llvm.riscv.sf.vc.v.i.se.nxv8i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <1 x i8> @test_sf_vc_v_i_e8mf8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i8> @llvm.riscv.sf.vc.v.i.nxv1i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <1 x i8> %0
+}
+
+declare <1 x i8> @llvm.riscv.sf.vc.v.i.nxv1i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <2 x i8> @test_sf_vc_v_i_e8mf4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i8> @llvm.riscv.sf.vc.v.i.nxv2i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <2 x i8> %0
+}
+
+declare <2 x i8> @llvm.riscv.sf.vc.v.i.nxv2i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <4 x i8> @test_sf_vc_v_i_e8mf2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i8> @llvm.riscv.sf.vc.v.i.nxv4i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <4 x i8> %0
+}
+
+declare <4 x i8> @llvm.riscv.sf.vc.v.i.nxv4i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <8 x i8> @test_sf_vc_v_i_e8m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i8> @llvm.riscv.sf.vc.v.i.nxv8i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <8 x i8> %0
+}
+
+declare <8 x i8> @llvm.riscv.sf.vc.v.i.nxv8i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <16 x i8> @test_sf_vc_v_i_e8m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i8> @llvm.riscv.sf.vc.v.i.nxv16i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <16 x i8> %0
+}
+
+declare <16 x i8> @llvm.riscv.sf.vc.v.i.nxv16i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <32 x i8> @test_sf_vc_v_i_e8m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i8> @llvm.riscv.sf.vc.v.i.nxv32i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <32 x i8> %0
+}
+
+declare <32 x i8> @llvm.riscv.sf.vc.v.i.nxv32i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <64 x i8> @test_sf_vc_v_i_e8m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <64 x i8> @llvm.riscv.sf.vc.v.i.nxv64i8.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <64 x i8> %0
+}
+
+declare <64 x i8> @llvm.riscv.sf.vc.v.i.nxv64i8.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <1 x i16> @test_sf_vc_v_i_e16mf4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.i.nxv1i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <1 x i16> %0
+}
+
+declare <1 x i16> @llvm.riscv.sf.vc.v.i.nxv1i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <2 x i16> @test_sf_vc_v_i_e16mf2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.i.nxv2i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <2 x i16> %0
+}
+
+declare <2 x i16> @llvm.riscv.sf.vc.v.i.nxv2i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <4 x i16> @test_sf_vc_v_i_e16m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.i.nxv4i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <4 x i16> %0
+}
+
+declare <4 x i16> @llvm.riscv.sf.vc.v.i.nxv4i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <8 x i16> @test_sf_vc_v_i_e16m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.i.nxv8i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <8 x i16> %0
+}
+
+declare <8 x i16> @llvm.riscv.sf.vc.v.i.nxv8i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <16 x i16> @test_sf_vc_v_i_e16m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.i.nxv16i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <16 x i16> %0
+}
+
+declare <16 x i16> @llvm.riscv.sf.vc.v.i.nxv16i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <32 x i16> @test_sf_vc_v_i_e16m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.i.nxv32i16.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <32 x i16> %0
+}
+
+declare <32 x i16> @llvm.riscv.sf.vc.v.i.nxv32i16.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <1 x i32> @test_sf_vc_v_i_e32mf2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.i.nxv1i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <1 x i32> %0
+}
+
+declare <1 x i32> @llvm.riscv.sf.vc.v.i.nxv1i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <2 x i32> @test_sf_vc_v_i_e32m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.i.nxv2i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <2 x i32> %0
+}
+
+declare <2 x i32> @llvm.riscv.sf.vc.v.i.nxv2i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <4 x i32> @test_sf_vc_v_i_e32m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.i.nxv4i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <4 x i32> %0
+}
+
+declare <4 x i32> @llvm.riscv.sf.vc.v.i.nxv4i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <8 x i32> @test_sf_vc_v_i_e32m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.i.nxv8i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <8 x i32> %0
+}
+
+declare <8 x i32> @llvm.riscv.sf.vc.v.i.nxv8i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <16 x i32> @test_sf_vc_v_i_e32m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.i.nxv16i32.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <16 x i32> %0
+}
+
+declare <16 x i32> @llvm.riscv.sf.vc.v.i.nxv16i32.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <1 x i64> @test_sf_vc_v_i_e64m1(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.i.nxv1i64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <1 x i64> %0
+}
+
+declare <1 x i64> @llvm.riscv.sf.vc.v.i.nxv1i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <2 x i64> @test_sf_vc_v_i_e64m2(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.i.nxv2i64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <2 x i64> %0
+}
+
+declare <2 x i64> @llvm.riscv.sf.vc.v.i.nxv2i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <4 x i64> @test_sf_vc_v_i_e64m4(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.i.nxv4i64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <4 x i64> %0
+}
+
+declare <4 x i64> @llvm.riscv.sf.vc.v.i.nxv4i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
+
+define <8 x i64> @test_sf_vc_v_i_e64m8(iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_i_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.i 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.i.nxv8i64.iXLen.iXLen.iXLen(iXLen 3, iXLen 31, iXLen 10, iXLen %vl)
+ ret <8 x i64> %0
+}
+
+declare <8 x i64> @llvm.riscv.sf.vc.v.i.nxv8i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xv.ll
new file mode 100644
index 000000000000000..12a149de6a4dfce
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xv.ll
@@ -0,0 +1,3008 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+xsfvcp \
+; RUN: -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+xsfvcp \
+; RUN: -verify-machineinstrs | FileCheck %s
+
+define void @test_sf_vc_vv_se_e8mf8(<1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i8.nxv1i8.iXLen(iXLen 3, iXLen 31, <1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, iXLen, <1 x i8>, <1 x i8>, iXLen)
+
+define void @test_sf_vc_vv_se_e8mf4(<2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i8.nxv2i8.iXLen(iXLen 3, iXLen 31, <2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, iXLen, <2 x i8>, <2 x i8>, iXLen)
+
+define void @test_sf_vc_vv_se_e8mf2(<4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i8.nxv4i8.iXLen(iXLen 3, iXLen 31, <4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, iXLen, <4 x i8>, <4 x i8>, iXLen)
+
+define void @test_sf_vc_vv_se_e8m1(<8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i8.nxv8i8.iXLen(iXLen 3, iXLen 31, <8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, iXLen, <8 x i8>, <8 x i8>, iXLen)
+
+define void @test_sf_vc_vv_se_e8m2(<16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i8.nxv16i8.iXLen(iXLen 3, iXLen 31, <16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, iXLen, <16 x i8>, <16 x i8>, iXLen)
+
+define void @test_sf_vc_vv_se_e8m4(<32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32i8.nxv32i8.iXLen(iXLen 3, iXLen 31, <32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, iXLen, <32 x i8>, <32 x i8>, iXLen)
+
+define void @test_sf_vc_vv_se_e8m8(<64 x i8> %vs2, <64 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v12
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv64i8.nxv64i8.iXLen(iXLen 3, iXLen 31, <64 x i8> %vs2, <64 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv64i8.nxv64i8.iXLen(iXLen, iXLen, <64 x i8>, <64 x i8>, iXLen)
+
+define void @test_sf_vc_vv_se_e16mf4(<1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i16.nxv1i16.iXLen(iXLen 3, iXLen 31, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, iXLen, <1 x i16>, <1 x i16>, iXLen)
+
+define void @test_sf_vc_vv_se_e16mf2(<2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i16.nxv2i16.iXLen(iXLen 3, iXLen 31, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, iXLen, <2 x i16>, <2 x i16>, iXLen)
+
+define void @test_sf_vc_vv_se_e16m1(<4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i16.nxv4i16.iXLen(iXLen 3, iXLen 31, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, iXLen, <4 x i16>, <4 x i16>, iXLen)
+
+define void @test_sf_vc_vv_se_e16m2(<8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i16.nxv8i16.iXLen(iXLen 3, iXLen 31, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, iXLen, <8 x i16>, <8 x i16>, iXLen)
+
+define void @test_sf_vc_vv_se_e16m4(<16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i16.nxv16i16.iXLen(iXLen 3, iXLen 31, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, iXLen, <16 x i16>, <16 x i16>, iXLen)
+
+define void @test_sf_vc_vv_se_e16m8(<32 x i16> %vs2, <32 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v12
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32i16.nxv32i16.iXLen(iXLen 3, iXLen 31, <32 x i16> %vs2, <32 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32i16.nxv32i16.iXLen(iXLen, iXLen, <32 x i16>, <32 x i16>, iXLen)
+
+define void @test_sf_vc_vv_se_e32mf2(<1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i32.nxv1i32.iXLen(iXLen 3, iXLen 31, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, iXLen, <1 x i32>, <1 x i32>, iXLen)
+
+define void @test_sf_vc_vv_se_e32m1(<2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i32.nxv2i32.iXLen(iXLen 3, iXLen 31, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, iXLen, <2 x i32>, <2 x i32>, iXLen)
+
+define void @test_sf_vc_vv_se_e32m2(<4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i32.nxv4i32.iXLen(iXLen 3, iXLen 31, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, iXLen, <4 x i32>, <4 x i32>, iXLen)
+
+define void @test_sf_vc_vv_se_e32m4(<8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i32.nxv8i32.iXLen(iXLen 3, iXLen 31, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, iXLen, <8 x i32>, <8 x i32>, iXLen)
+
+define void @test_sf_vc_vv_se_e32m8(<16 x i32> %vs2, <16 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v12
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i32.nxv16i32.iXLen(iXLen 3, iXLen 31, <16 x i32> %vs2, <16 x i32> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i32.nxv16i32.iXLen(iXLen, iXLen, <16 x i32>, <16 x i32>, iXLen)
+
+define void @test_sf_vc_vv_se_e64m1(<1 x i64> %vs2, <1 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i64.nxv1i64.iXLen(iXLen 3, iXLen 31, <1 x i64> %vs2, <1 x i64> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i64.nxv1i64.iXLen(iXLen, iXLen, <1 x i64>, <1 x i64>, iXLen)
+
+define void @test_sf_vc_vv_se_e64m2(<2 x i64> %vs2, <2 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i64.nxv2i64.iXLen(iXLen 3, iXLen 31, <2 x i64> %vs2, <2 x i64> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i64.nxv2i64.iXLen(iXLen, iXLen, <2 x i64>, <2 x i64>, iXLen)
+
+define void @test_sf_vc_vv_se_e64m4(<4 x i64> %vs2, <4 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i64.nxv4i64.iXLen(iXLen 3, iXLen 31, <4 x i64> %vs2, <4 x i64> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i64.nxv4i64.iXLen(iXLen, iXLen, <4 x i64>, <4 x i64>, iXLen)
+
+define void @test_sf_vc_vv_se_e64m8(<8 x i64> %vs2, <8 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vv_se_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.vv 3, 31, v8, v12
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i64.nxv8i64.iXLen(iXLen 3, iXLen 31, <8 x i64> %vs2, <8 x i64> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i64.nxv8i64.iXLen(iXLen, iXLen, <8 x i64>, <8 x i64>, iXLen)
+
+define <1 x i8> @test_sf_vc_v_vv_se_e8mf8(<1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv1i8.iXLen.nxv1i8.iXLen(iXLen 3, <1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl)
+ ret <1 x i8> %0
+}
+
+declare <1 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, <1 x i8>, <1 x i8>, iXLen)
+
+define <2 x i8> @test_sf_vc_v_vv_se_e8mf4(<2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv2i8.iXLen.nxv2i8.iXLen(iXLen 3, <2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl)
+ ret <2 x i8> %0
+}
+
+declare <2 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, <2 x i8>, <2 x i8>, iXLen)
+
+define <4 x i8> @test_sf_vc_v_vv_se_e8mf2(<4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv4i8.iXLen.nxv4i8.iXLen(iXLen 3, <4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl)
+ ret <4 x i8> %0
+}
+
+declare <4 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, <4 x i8>, <4 x i8>, iXLen)
+
+define <8 x i8> @test_sf_vc_v_vv_se_e8m1(<8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv8i8.iXLen.nxv8i8.iXLen(iXLen 3, <8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl)
+ ret <8 x i8> %0
+}
+
+declare <8 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, <8 x i8>, <8 x i8>, iXLen)
+
+define <16 x i8> @test_sf_vc_v_vv_se_e8m2(<16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv16i8.iXLen.nxv16i8.iXLen(iXLen 3, <16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl)
+ ret <16 x i8> %0
+}
+
+declare <16 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, <16 x i8>, <16 x i8>, iXLen)
+
+define <32 x i8> @test_sf_vc_v_vv_se_e8m4(<32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv32i8.iXLen.nxv32i8.iXLen(iXLen 3, <32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl)
+ ret <32 x i8> %0
+}
+
+declare <32 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, <32 x i8>, <32 x i8>, iXLen)
+
+define <64 x i8> @test_sf_vc_v_vv_se_e8m8(<64 x i8> %vs2, <64 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <64 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv64i8.iXLen.nxv64i8.iXLen(iXLen 3, <64 x i8> %vs2, <64 x i8> %vs1, iXLen %vl)
+ ret <64 x i8> %0
+}
+
+declare <64 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, <64 x i8>, <64 x i8>, iXLen)
+
+define <1 x i16> @test_sf_vc_v_vv_se_e16mf4(<1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv1i16.iXLen.nxv1i16.iXLen(iXLen 3, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl)
+ ret <1 x i16> %0
+}
+
+declare <1 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, <1 x i16>, <1 x i16>, iXLen)
+
+define <2 x i16> @test_sf_vc_v_vv_se_e16mf2(<2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv2i16.iXLen.nxv2i16.iXLen(iXLen 3, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl)
+ ret <2 x i16> %0
+}
+
+declare <2 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, <2 x i16>, <2 x i16>, iXLen)
+
+define <4 x i16> @test_sf_vc_v_vv_se_e16m1(<4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv4i16.iXLen.nxv4i16.iXLen(iXLen 3, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl)
+ ret <4 x i16> %0
+}
+
+declare <4 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, <4 x i16>, <4 x i16>, iXLen)
+
+define <8 x i16> @test_sf_vc_v_vv_se_e16m2(<8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv8i16.iXLen.nxv8i16.iXLen(iXLen 3, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl)
+ ret <8 x i16> %0
+}
+
+declare <8 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, <8 x i16>, <8 x i16>, iXLen)
+
+define <16 x i16> @test_sf_vc_v_vv_se_e16m4(<16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv16i16.iXLen.nxv16i16.iXLen(iXLen 3, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl)
+ ret <16 x i16> %0
+}
+
+declare <16 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, <16 x i16>, <16 x i16>, iXLen)
+
+define <32 x i16> @test_sf_vc_v_vv_se_e16m8(<32 x i16> %vs2, <32 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv32i16.iXLen.nxv32i16.iXLen(iXLen 3, <32 x i16> %vs2, <32 x i16> %vs1, iXLen %vl)
+ ret <32 x i16> %0
+}
+
+declare <32 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, <32 x i16>, <32 x i16>, iXLen)
+
+define <1 x i32> @test_sf_vc_v_vv_se_e32mf2(<1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv1i32.iXLen.nxv1i32.iXLen(iXLen 3, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl)
+ ret <1 x i32> %0
+}
+
+declare <1 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, <1 x i32>, <1 x i32>, iXLen)
+
+define <2 x i32> @test_sf_vc_v_vv_se_e32m1(<2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv2i32.iXLen.nxv2i32.iXLen(iXLen 3, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl)
+ ret <2 x i32> %0
+}
+
+declare <2 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, <2 x i32>, <2 x i32>, iXLen)
+
+define <4 x i32> @test_sf_vc_v_vv_se_e32m2(<4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv4i32.iXLen.nxv4i32.iXLen(iXLen 3, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl)
+ ret <4 x i32> %0
+}
+
+declare <4 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, <4 x i32>, <4 x i32>, iXLen)
+
+define <8 x i32> @test_sf_vc_v_vv_se_e32m4(<8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv8i32.iXLen.nxv8i32.iXLen(iXLen 3, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl)
+ ret <8 x i32> %0
+}
+
+declare <8 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, <8 x i32>, <8 x i32>, iXLen)
+
+define <16 x i32> @test_sf_vc_v_vv_se_e32m8(<16 x i32> %vs2, <16 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv16i32.iXLen.nxv16i32.iXLen(iXLen 3, <16 x i32> %vs2, <16 x i32> %vs1, iXLen %vl)
+ ret <16 x i32> %0
+}
+
+declare <16 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, <16 x i32>, <16 x i32>, iXLen)
+
+define <1 x i64> @test_sf_vc_v_vv_se_e64m1(<1 x i64> %vs2, <1 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv1i64.iXLen.nxv1i64.iXLen(iXLen 3, <1 x i64> %vs2, <1 x i64> %vs1, iXLen %vl)
+ ret <1 x i64> %0
+}
+
+declare <1 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, <1 x i64>, <1 x i64>, iXLen)
+
+define <2 x i64> @test_sf_vc_v_vv_se_e64m2(<2 x i64> %vs2, <2 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv2i64.iXLen.nxv2i64.iXLen(iXLen 3, <2 x i64> %vs2, <2 x i64> %vs1, iXLen %vl)
+ ret <2 x i64> %0
+}
+
+declare <2 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, <2 x i64>, <2 x i64>, iXLen)
+
+define <4 x i64> @test_sf_vc_v_vv_se_e64m4(<4 x i64> %vs2, <4 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv4i64.iXLen.nxv4i64.iXLen(iXLen 3, <4 x i64> %vs2, <4 x i64> %vs1, iXLen %vl)
+ ret <4 x i64> %0
+}
+
+declare <4 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, <4 x i64>, <4 x i64>, iXLen)
+
+define <8 x i64> @test_sf_vc_v_vv_se_e64m8(<8 x i64> %vs2, <8 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_se_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv8i64.iXLen.nxv8i64.iXLen(iXLen 3, <8 x i64> %vs2, <8 x i64> %vs1, iXLen %vl)
+ ret <8 x i64> %0
+}
+
+declare <8 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, <8 x i64>, <8 x i64>, iXLen)
+
+define <1 x i8> @test_sf_vc_v_vv_e8mf8(<1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i8> @llvm.riscv.sf.vc.v.vv.nxv1i8.iXLen.nxv1i8.iXLen(iXLen 3, <1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl)
+ ret <1 x i8> %0
+}
+
+declare <1 x i8> @llvm.riscv.sf.vc.v.vv.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, <1 x i8>, <1 x i8>, iXLen)
+
+define <2 x i8> @test_sf_vc_v_vv_e8mf4(<2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i8> @llvm.riscv.sf.vc.v.vv.nxv2i8.iXLen.nxv2i8.iXLen(iXLen 3, <2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl)
+ ret <2 x i8> %0
+}
+
+declare <2 x i8> @llvm.riscv.sf.vc.v.vv.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, <2 x i8>, <2 x i8>, iXLen)
+
+define <4 x i8> @test_sf_vc_v_vv_e8mf2(<4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i8> @llvm.riscv.sf.vc.v.vv.nxv4i8.iXLen.nxv4i8.iXLen(iXLen 3, <4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl)
+ ret <4 x i8> %0
+}
+
+declare <4 x i8> @llvm.riscv.sf.vc.v.vv.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, <4 x i8>, <4 x i8>, iXLen)
+
+define <8 x i8> @test_sf_vc_v_vv_e8m1(<8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i8> @llvm.riscv.sf.vc.v.vv.nxv8i8.iXLen.nxv8i8.iXLen(iXLen 3, <8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl)
+ ret <8 x i8> %0
+}
+
+declare <8 x i8> @llvm.riscv.sf.vc.v.vv.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, <8 x i8>, <8 x i8>, iXLen)
+
+define <16 x i8> @test_sf_vc_v_vv_e8m2(<16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i8> @llvm.riscv.sf.vc.v.vv.nxv16i8.iXLen.nxv16i8.iXLen(iXLen 3, <16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl)
+ ret <16 x i8> %0
+}
+
+declare <16 x i8> @llvm.riscv.sf.vc.v.vv.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, <16 x i8>, <16 x i8>, iXLen)
+
+define <32 x i8> @test_sf_vc_v_vv_e8m4(<32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i8> @llvm.riscv.sf.vc.v.vv.nxv32i8.iXLen.nxv32i8.iXLen(iXLen 3, <32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl)
+ ret <32 x i8> %0
+}
+
+declare <32 x i8> @llvm.riscv.sf.vc.v.vv.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, <32 x i8>, <32 x i8>, iXLen)
+
+define <64 x i8> @test_sf_vc_v_vv_e8m8(<64 x i8> %vs2, <64 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <64 x i8> @llvm.riscv.sf.vc.v.vv.nxv64i8.iXLen.nxv64i8.iXLen(iXLen 3, <64 x i8> %vs2, <64 x i8> %vs1, iXLen %vl)
+ ret <64 x i8> %0
+}
+
+declare <64 x i8> @llvm.riscv.sf.vc.v.vv.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, <64 x i8>, <64 x i8>, iXLen)
+
+define <1 x i16> @test_sf_vc_v_vv_e16mf4(<1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.vv.nxv1i16.iXLen.nxv1i16.iXLen(iXLen 3, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl)
+ ret <1 x i16> %0
+}
+
+declare <1 x i16> @llvm.riscv.sf.vc.v.vv.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, <1 x i16>, <1 x i16>, iXLen)
+
+define <2 x i16> @test_sf_vc_v_vv_e16mf2(<2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.vv.nxv2i16.iXLen.nxv2i16.iXLen(iXLen 3, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl)
+ ret <2 x i16> %0
+}
+
+declare <2 x i16> @llvm.riscv.sf.vc.v.vv.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, <2 x i16>, <2 x i16>, iXLen)
+
+define <4 x i16> @test_sf_vc_v_vv_e16m1(<4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.vv.nxv4i16.iXLen.nxv4i16.iXLen(iXLen 3, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl)
+ ret <4 x i16> %0
+}
+
+declare <4 x i16> @llvm.riscv.sf.vc.v.vv.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, <4 x i16>, <4 x i16>, iXLen)
+
+define <8 x i16> @test_sf_vc_v_vv_e16m2(<8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.vv.nxv8i16.iXLen.nxv8i16.iXLen(iXLen 3, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl)
+ ret <8 x i16> %0
+}
+
+declare <8 x i16> @llvm.riscv.sf.vc.v.vv.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, <8 x i16>, <8 x i16>, iXLen)
+
+define <16 x i16> @test_sf_vc_v_vv_e16m4(<16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.vv.nxv16i16.iXLen.nxv16i16.iXLen(iXLen 3, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl)
+ ret <16 x i16> %0
+}
+
+declare <16 x i16> @llvm.riscv.sf.vc.v.vv.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, <16 x i16>, <16 x i16>, iXLen)
+
+define <32 x i16> @test_sf_vc_v_vv_e16m8(<32 x i16> %vs2, <32 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.vv.nxv32i16.iXLen.nxv32i16.iXLen(iXLen 3, <32 x i16> %vs2, <32 x i16> %vs1, iXLen %vl)
+ ret <32 x i16> %0
+}
+
+declare <32 x i16> @llvm.riscv.sf.vc.v.vv.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, <32 x i16>, <32 x i16>, iXLen)
+
+define <1 x i32> @test_sf_vc_v_vv_e32mf2(<1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.vv.nxv1i32.iXLen.nxv1i32.iXLen(iXLen 3, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl)
+ ret <1 x i32> %0
+}
+
+declare <1 x i32> @llvm.riscv.sf.vc.v.vv.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, <1 x i32>, <1 x i32>, iXLen)
+
+define <2 x i32> @test_sf_vc_v_vv_e32m1(<2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.vv.nxv2i32.iXLen.nxv2i32.iXLen(iXLen 3, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl)
+ ret <2 x i32> %0
+}
+
+declare <2 x i32> @llvm.riscv.sf.vc.v.vv.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, <2 x i32>, <2 x i32>, iXLen)
+
+define <4 x i32> @test_sf_vc_v_vv_e32m2(<4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.vv.nxv4i32.iXLen.nxv4i32.iXLen(iXLen 3, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl)
+ ret <4 x i32> %0
+}
+
+declare <4 x i32> @llvm.riscv.sf.vc.v.vv.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, <4 x i32>, <4 x i32>, iXLen)
+
+define <8 x i32> @test_sf_vc_v_vv_e32m4(<8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.vv.nxv8i32.iXLen.nxv8i32.iXLen(iXLen 3, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl)
+ ret <8 x i32> %0
+}
+
+declare <8 x i32> @llvm.riscv.sf.vc.v.vv.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, <8 x i32>, <8 x i32>, iXLen)
+
+define <16 x i32> @test_sf_vc_v_vv_e32m8(<16 x i32> %vs2, <16 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.vv.nxv16i32.iXLen.nxv16i32.iXLen(iXLen 3, <16 x i32> %vs2, <16 x i32> %vs1, iXLen %vl)
+ ret <16 x i32> %0
+}
+
+declare <16 x i32> @llvm.riscv.sf.vc.v.vv.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, <16 x i32>, <16 x i32>, iXLen)
+
+define <1 x i64> @test_sf_vc_v_vv_e64m1(<1 x i64> %vs2, <1 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.vv.nxv1i64.iXLen.nxv1i64.iXLen(iXLen 3, <1 x i64> %vs2, <1 x i64> %vs1, iXLen %vl)
+ ret <1 x i64> %0
+}
+
+declare <1 x i64> @llvm.riscv.sf.vc.v.vv.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, <1 x i64>, <1 x i64>, iXLen)
+
+define <2 x i64> @test_sf_vc_v_vv_e64m2(<2 x i64> %vs2, <2 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.vv.nxv2i64.iXLen.nxv2i64.iXLen(iXLen 3, <2 x i64> %vs2, <2 x i64> %vs1, iXLen %vl)
+ ret <2 x i64> %0
+}
+
+declare <2 x i64> @llvm.riscv.sf.vc.v.vv.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, <2 x i64>, <2 x i64>, iXLen)
+
+define <4 x i64> @test_sf_vc_v_vv_e64m4(<4 x i64> %vs2, <4 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.vv.nxv4i64.iXLen.nxv4i64.iXLen(iXLen 3, <4 x i64> %vs2, <4 x i64> %vs1, iXLen %vl)
+ ret <4 x i64> %0
+}
+
+declare <4 x i64> @llvm.riscv.sf.vc.v.vv.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, <4 x i64>, <4 x i64>, iXLen)
+
+define <8 x i64> @test_sf_vc_v_vv_e64m8(<8 x i64> %vs2, <8 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vv_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.vv.nxv8i64.iXLen.nxv8i64.iXLen(iXLen 3, <8 x i64> %vs2, <8 x i64> %vs1, iXLen %vl)
+ ret <8 x i64> %0
+}
+
+declare <8 x i64> @llvm.riscv.sf.vc.v.vv.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, <8 x i64>, <8 x i64>, iXLen)
+
+define void @test_sf_vc_xv_se_e8mf8(<1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i8.i8.iXLen(iXLen 3, iXLen 31, <1 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i8.i8.iXLen(iXLen, iXLen, <1 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xv_se_e8mf4(<2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i8.i8.iXLen(iXLen 3, iXLen 31, <2 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i8.i8.iXLen(iXLen, iXLen, <2 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xv_se_e8mf2(<4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i8.i8.iXLen(iXLen 3, iXLen 31, <4 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i8.i8.iXLen(iXLen, iXLen, <4 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xv_se_e8m1(<8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i8.i8.iXLen(iXLen 3, iXLen 31, <8 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i8.i8.iXLen(iXLen, iXLen, <8 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xv_se_e8m2(<16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i8.i8.iXLen(iXLen 3, iXLen 31, <16 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i8.i8.iXLen(iXLen, iXLen, <16 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xv_se_e8m4(<32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32i8.i8.iXLen(iXLen 3, iXLen 31, <32 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32i8.i8.iXLen(iXLen, iXLen, <32 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xv_se_e8m8(<64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv64i8.i8.iXLen(iXLen 3, iXLen 31, <64 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv64i8.i8.iXLen(iXLen, iXLen, <64 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xv_se_e16mf4(<1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i16.i16.iXLen(iXLen 3, iXLen 31, <1 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i16.i16.iXLen(iXLen, iXLen, <1 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xv_se_e16mf2(<2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i16.i16.iXLen(iXLen 3, iXLen 31, <2 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i16.i16.iXLen(iXLen, iXLen, <2 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xv_se_e16m1(<4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i16.i16.iXLen(iXLen 3, iXLen 31, <4 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i16.i16.iXLen(iXLen, iXLen, <4 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xv_se_e16m2(<8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i16.i16.iXLen(iXLen 3, iXLen 31, <8 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i16.i16.iXLen(iXLen, iXLen, <8 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xv_se_e16m4(<16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i16.i16.iXLen(iXLen 3, iXLen 31, <16 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i16.i16.iXLen(iXLen, iXLen, <16 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xv_se_e16m8(<32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32i16.i16.iXLen(iXLen 3, iXLen 31, <32 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32i16.i16.iXLen(iXLen, iXLen, <32 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xv_se_e32mf2(<1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i32.i32.iXLen(iXLen 3, iXLen 31, <1 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i32.i32.iXLen(iXLen, iXLen, <1 x i32>, i32, iXLen)
+
+define void @test_sf_vc_xv_se_e32m1(<2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i32.i32.iXLen(iXLen 3, iXLen 31, <2 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i32.i32.iXLen(iXLen, iXLen, <2 x i32>, i32, iXLen)
+
+define void @test_sf_vc_xv_se_e32m2(<4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i32.i32.iXLen(iXLen 3, iXLen 31, <4 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i32.i32.iXLen(iXLen, iXLen, <4 x i32>, i32, iXLen)
+
+define void @test_sf_vc_xv_se_e32m4(<8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i32.i32.iXLen(iXLen 3, iXLen 31, <8 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i32.i32.iXLen(iXLen, iXLen, <8 x i32>, i32, iXLen)
+
+define void @test_sf_vc_xv_se_e32m8(<16 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xv_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i32.i32.iXLen(iXLen 3, iXLen 31, <16 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i32.i32.iXLen(iXLen, iXLen, <16 x i32>, i32, iXLen)
+
+define <1 x i8> @test_sf_vc_v_xv_se_e8mf8(<1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv1i8.iXLen.i8.iXLen(iXLen 3, <1 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <1 x i8> %0
+}
+
+declare <1 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv1i8.iXLen.i8.iXLen(iXLen, <1 x i8>, i8, iXLen)
+
+define <2 x i8> @test_sf_vc_v_xv_se_e8mf4(<2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv2i8.iXLen.i8.iXLen(iXLen 3, <2 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <2 x i8> %0
+}
+
+declare <2 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv2i8.iXLen.i8.iXLen(iXLen, <2 x i8>, i8, iXLen)
+
+define <4 x i8> @test_sf_vc_v_xv_se_e8mf2(<4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv4i8.iXLen.i8.iXLen(iXLen 3, <4 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <4 x i8> %0
+}
+
+declare <4 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv4i8.iXLen.i8.iXLen(iXLen, <4 x i8>, i8, iXLen)
+
+define <8 x i8> @test_sf_vc_v_xv_se_e8m1(<8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv8i8.iXLen.i8.iXLen(iXLen 3, <8 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <8 x i8> %0
+}
+
+declare <8 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv8i8.iXLen.i8.iXLen(iXLen, <8 x i8>, i8, iXLen)
+
+define <16 x i8> @test_sf_vc_v_xv_se_e8m2(<16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv16i8.iXLen.i8.iXLen(iXLen 3, <16 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <16 x i8> %0
+}
+
+declare <16 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv16i8.iXLen.i8.iXLen(iXLen, <16 x i8>, i8, iXLen)
+
+define <32 x i8> @test_sf_vc_v_xv_se_e8m4(<32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv32i8.iXLen.i8.iXLen(iXLen 3, <32 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <32 x i8> %0
+}
+
+declare <32 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv32i8.iXLen.i8.iXLen(iXLen, <32 x i8>, i8, iXLen)
+
+define <64 x i8> @test_sf_vc_v_xv_se_e8m8(<64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <64 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv64i8.iXLen.i8.iXLen(iXLen 3, <64 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <64 x i8> %0
+}
+
+declare <64 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv64i8.iXLen.i8.iXLen(iXLen, <64 x i8>, i8, iXLen)
+
+define <1 x i16> @test_sf_vc_v_xv_se_e16mf4(<1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv1i16.iXLen.i16.iXLen(iXLen 3, <1 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <1 x i16> %0
+}
+
+declare <1 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv1i16.iXLen.i16.iXLen(iXLen, <1 x i16>, i16, iXLen)
+
+define <2 x i16> @test_sf_vc_v_xv_se_e16mf2(<2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv2i16.iXLen.i16.iXLen(iXLen 3, <2 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <2 x i16> %0
+}
+
+declare <2 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv2i16.iXLen.i16.iXLen(iXLen, <2 x i16>, i16, iXLen)
+
+define <4 x i16> @test_sf_vc_v_xv_se_e16m1(<4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv4i16.iXLen.i16.iXLen(iXLen 3, <4 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <4 x i16> %0
+}
+
+declare <4 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv4i16.iXLen.i16.iXLen(iXLen, <4 x i16>, i16, iXLen)
+
+define <8 x i16> @test_sf_vc_v_xv_se_e16m2(<8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv8i16.iXLen.i16.iXLen(iXLen 3, <8 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <8 x i16> %0
+}
+
+declare <8 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv8i16.iXLen.i16.iXLen(iXLen, <8 x i16>, i16, iXLen)
+
+define <16 x i16> @test_sf_vc_v_xv_se_e16m4(<16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv16i16.iXLen.i16.iXLen(iXLen 3, <16 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <16 x i16> %0
+}
+
+declare <16 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv16i16.iXLen.i16.iXLen(iXLen, <16 x i16>, i16, iXLen)
+
+define <32 x i16> @test_sf_vc_v_xv_se_e16m8(<32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv32i16.iXLen.i16.iXLen(iXLen 3, <32 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <32 x i16> %0
+}
+
+declare <32 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv32i16.iXLen.i16.iXLen(iXLen, <32 x i16>, i16, iXLen)
+
+define <1 x i32> @test_sf_vc_v_xv_se_e32mf2(<1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv1i32.i32.i32.iXLen(iXLen 3, <1 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <1 x i32> %0
+}
+
+declare <1 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv1i32.i32.i32.iXLen(iXLen, <1 x i32>, i32, iXLen)
+
+define <2 x i32> @test_sf_vc_v_xv_se_e32m1(<2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv2i32.i32.i32.iXLen(iXLen 3, <2 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <2 x i32> %0
+}
+
+declare <2 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv2i32.i32.i32.iXLen(iXLen, <2 x i32>, i32, iXLen)
+
+define <4 x i32> @test_sf_vc_v_xv_se_e32m2(<4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv4i32.i32.i32.iXLen(iXLen 3, <4 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <4 x i32> %0
+}
+
+declare <4 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv4i32.i32.i32.iXLen(iXLen, <4 x i32>, i32, iXLen)
+
+define <8 x i32> @test_sf_vc_v_xv_se_e32m4(<8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv8i32.i32.i32.iXLen(iXLen 3, <8 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <8 x i32> %0
+}
+
+declare <8 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv8i32.i32.i32.iXLen(iXLen, <8 x i32>, i32, iXLen)
+
+define <16 x i32> @test_sf_vc_v_xv_se_e32m8(<16 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv16i32.i32.i32.iXLen(iXLen 3, <16 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <16 x i32> %0
+}
+
+declare <16 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv16i32.i32.i32.iXLen(iXLen, <16 x i32>, i32, iXLen)
+
+define <1 x i8> @test_sf_vc_v_xv_e8mf8(<1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i8> @llvm.riscv.sf.vc.v.xv.nxv1i8.iXLen.i8.iXLen(iXLen 3, <1 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <1 x i8> %0
+}
+
+declare <1 x i8> @llvm.riscv.sf.vc.v.xv.nxv1i8.iXLen.i8.iXLen(iXLen, <1 x i8>, i8, iXLen)
+
+define <2 x i8> @test_sf_vc_v_xv_e8mf4(<2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i8> @llvm.riscv.sf.vc.v.xv.nxv2i8.iXLen.i8.iXLen(iXLen 3, <2 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <2 x i8> %0
+}
+
+declare <2 x i8> @llvm.riscv.sf.vc.v.xv.nxv2i8.iXLen.i8.iXLen(iXLen, <2 x i8>, i8, iXLen)
+
+define <4 x i8> @test_sf_vc_v_xv_e8mf2(<4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i8> @llvm.riscv.sf.vc.v.xv.nxv4i8.iXLen.i8.iXLen(iXLen 3, <4 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <4 x i8> %0
+}
+
+declare <4 x i8> @llvm.riscv.sf.vc.v.xv.nxv4i8.iXLen.i8.iXLen(iXLen, <4 x i8>, i8, iXLen)
+
+define <8 x i8> @test_sf_vc_v_xv_e8m1(<8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i8> @llvm.riscv.sf.vc.v.xv.nxv8i8.iXLen.i8.iXLen(iXLen 3, <8 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <8 x i8> %0
+}
+
+declare <8 x i8> @llvm.riscv.sf.vc.v.xv.nxv8i8.iXLen.i8.iXLen(iXLen, <8 x i8>, i8, iXLen)
+
+define <16 x i8> @test_sf_vc_v_xv_e8m2(<16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i8> @llvm.riscv.sf.vc.v.xv.nxv16i8.iXLen.i8.iXLen(iXLen 3, <16 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <16 x i8> %0
+}
+
+declare <16 x i8> @llvm.riscv.sf.vc.v.xv.nxv16i8.iXLen.i8.iXLen(iXLen, <16 x i8>, i8, iXLen)
+
+define <32 x i8> @test_sf_vc_v_xv_e8m4(<32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i8> @llvm.riscv.sf.vc.v.xv.nxv32i8.iXLen.i8.iXLen(iXLen 3, <32 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <32 x i8> %0
+}
+
+declare <32 x i8> @llvm.riscv.sf.vc.v.xv.nxv32i8.iXLen.i8.iXLen(iXLen, <32 x i8>, i8, iXLen)
+
+define <64 x i8> @test_sf_vc_v_xv_e8m8(<64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <64 x i8> @llvm.riscv.sf.vc.v.xv.nxv64i8.iXLen.i8.iXLen(iXLen 3, <64 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <64 x i8> %0
+}
+
+declare <64 x i8> @llvm.riscv.sf.vc.v.xv.nxv64i8.iXLen.i8.iXLen(iXLen, <64 x i8>, i8, iXLen)
+
+define <1 x i16> @test_sf_vc_v_xv_e16mf4(<1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.xv.nxv1i16.iXLen.i16.iXLen(iXLen 3, <1 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <1 x i16> %0
+}
+
+declare <1 x i16> @llvm.riscv.sf.vc.v.xv.nxv1i16.iXLen.i16.iXLen(iXLen, <1 x i16>, i16, iXLen)
+
+define <2 x i16> @test_sf_vc_v_xv_e16mf2(<2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.xv.nxv2i16.iXLen.i16.iXLen(iXLen 3, <2 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <2 x i16> %0
+}
+
+declare <2 x i16> @llvm.riscv.sf.vc.v.xv.nxv2i16.iXLen.i16.iXLen(iXLen, <2 x i16>, i16, iXLen)
+
+define <4 x i16> @test_sf_vc_v_xv_e16m1(<4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.xv.nxv4i16.iXLen.i16.iXLen(iXLen 3, <4 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <4 x i16> %0
+}
+
+declare <4 x i16> @llvm.riscv.sf.vc.v.xv.nxv4i16.iXLen.i16.iXLen(iXLen, <4 x i16>, i16, iXLen)
+
+define <8 x i16> @test_sf_vc_v_xv_e16m2(<8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.xv.nxv8i16.iXLen.i16.iXLen(iXLen 3, <8 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <8 x i16> %0
+}
+
+declare <8 x i16> @llvm.riscv.sf.vc.v.xv.nxv8i16.iXLen.i16.iXLen(iXLen, <8 x i16>, i16, iXLen)
+
+define <16 x i16> @test_sf_vc_v_xv_e16m4(<16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.xv.nxv16i16.iXLen.i16.iXLen(iXLen 3, <16 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <16 x i16> %0
+}
+
+declare <16 x i16> @llvm.riscv.sf.vc.v.xv.nxv16i16.iXLen.i16.iXLen(iXLen, <16 x i16>, i16, iXLen)
+
+define <32 x i16> @test_sf_vc_v_xv_e16m8(<32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.xv.nxv32i16.iXLen.i16.iXLen(iXLen 3, <32 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <32 x i16> %0
+}
+
+declare <32 x i16> @llvm.riscv.sf.vc.v.xv.nxv32i16.iXLen.i16.iXLen(iXLen, <32 x i16>, i16, iXLen)
+
+define <1 x i32> @test_sf_vc_v_xv_e32mf2(<1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.xv.nxv1i32.i32.i32.iXLen(iXLen 3, <1 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <1 x i32> %0
+}
+
+declare <1 x i32> @llvm.riscv.sf.vc.v.xv.nxv1i32.i32.i32.iXLen(iXLen, <1 x i32>, i32, iXLen)
+
+define <2 x i32> @test_sf_vc_v_xv_e32m1(<2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.xv.nxv2i32.i32.i32.iXLen(iXLen 3, <2 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <2 x i32> %0
+}
+
+declare <2 x i32> @llvm.riscv.sf.vc.v.xv.nxv2i32.i32.i32.iXLen(iXLen, <2 x i32>, i32, iXLen)
+
+define <4 x i32> @test_sf_vc_v_xv_e32m2(<4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.xv.nxv4i32.i32.i32.iXLen(iXLen 3, <4 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <4 x i32> %0
+}
+
+declare <4 x i32> @llvm.riscv.sf.vc.v.xv.nxv4i32.i32.i32.iXLen(iXLen, <4 x i32>, i32, iXLen)
+
+define <8 x i32> @test_sf_vc_v_xv_e32m4(<8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.xv.nxv8i32.i32.i32.iXLen(iXLen 3, <8 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <8 x i32> %0
+}
+
+declare <8 x i32> @llvm.riscv.sf.vc.v.xv.nxv8i32.i32.i32.iXLen(iXLen, <8 x i32>, i32, iXLen)
+
+define <16 x i32> @test_sf_vc_v_xv_e32m8(<16 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xv_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.xv.nxv16i32.i32.i32.iXLen(iXLen 3, <16 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <16 x i32> %0
+}
+
+declare <16 x i32> @llvm.riscv.sf.vc.v.xv.nxv16i32.i32.i32.iXLen(iXLen, <16 x i32>, i32, iXLen)
+
+define void @test_sf_vc_iv_se_e8mf8(<1 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i8.iXLen.iXLen(iXLen 3, iXLen 31, <1 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i8.iXLen.iXLen(iXLen, iXLen, <1 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e8mf4(<2 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i8.iXLen.iXLen(iXLen 3, iXLen 31, <2 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i8.iXLen.iXLen(iXLen, iXLen, <2 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e8mf2(<4 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i8.iXLen.iXLen(iXLen 3, iXLen 31, <4 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i8.iXLen.iXLen(iXLen, iXLen, <4 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e8m1(<8 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i8.iXLen.iXLen(iXLen 3, iXLen 31, <8 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i8.iXLen.iXLen(iXLen, iXLen, <8 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e8m2(<16 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i8.iXLen.iXLen(iXLen 3, iXLen 31, <16 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i8.iXLen.iXLen(iXLen, iXLen, <16 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e8m4(<32 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32i8.iXLen.iXLen(iXLen 3, iXLen 31, <32 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32i8.iXLen.iXLen(iXLen, iXLen, <32 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e8m8(<64 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv64i8.iXLen.iXLen(iXLen 3, iXLen 31, <64 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv64i8.iXLen.iXLen(iXLen, iXLen, <64 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e16mf4(<1 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i16.iXLen.iXLen(iXLen 3, iXLen 31, <1 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i16.iXLen.iXLen(iXLen, iXLen, <1 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e16mf2(<2 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i16.iXLen.iXLen(iXLen 3, iXLen 31, <2 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i16.iXLen.iXLen(iXLen, iXLen, <2 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e16m1(<4 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i16.iXLen.iXLen(iXLen 3, iXLen 31, <4 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i16.iXLen.iXLen(iXLen, iXLen, <4 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e16m2(<8 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i16.iXLen.iXLen(iXLen 3, iXLen 31, <8 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i16.iXLen.iXLen(iXLen, iXLen, <8 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e16m4(<16 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i16.iXLen.iXLen(iXLen 3, iXLen 31, <16 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i16.iXLen.iXLen(iXLen, iXLen, <16 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e16m8(<32 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32i16.iXLen.iXLen(iXLen 3, iXLen 31, <32 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32i16.iXLen.iXLen(iXLen, iXLen, <32 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e32mf2(<1 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i32.iXLen.iXLen(iXLen 3, iXLen 31, <1 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i32.iXLen.iXLen(iXLen, iXLen, <1 x i32>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e32m1(<2 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i32.iXLen.iXLen(iXLen 3, iXLen 31, <2 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i32.iXLen.iXLen(iXLen, iXLen, <2 x i32>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e32m2(<4 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i32.iXLen.iXLen(iXLen 3, iXLen 31, <4 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i32.iXLen.iXLen(iXLen, iXLen, <4 x i32>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e32m4(<8 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i32.iXLen.iXLen(iXLen 3, iXLen 31, <8 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i32.iXLen.iXLen(iXLen, iXLen, <8 x i32>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e32m8(<16 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i32.iXLen.iXLen(iXLen 3, iXLen 31, <16 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i32.iXLen.iXLen(iXLen, iXLen, <16 x i32>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e64m1(<1 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i64.iXLen.iXLen(iXLen 3, iXLen 31, <1 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i64.iXLen.iXLen(iXLen, iXLen, <1 x i64>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e64m2(<2 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i64.iXLen.iXLen(iXLen 3, iXLen 31, <2 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i64.iXLen.iXLen(iXLen, iXLen, <2 x i64>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e64m4(<4 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i64.iXLen.iXLen(iXLen 3, iXLen 31, <4 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i64.iXLen.iXLen(iXLen, iXLen, <4 x i64>, iXLen, iXLen)
+
+define void @test_sf_vc_iv_se_e64m8(<8 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_iv_se_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i64.iXLen.iXLen(iXLen 3, iXLen 31, <8 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i64.iXLen.iXLen(iXLen, iXLen, <8 x i64>, iXLen, iXLen)
+
+define <1 x i8> @test_sf_vc_v_iv_se_e8mf8(<1 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv1i8.iXLen.iXLen.iXLen(iXLen 3, <1 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <1 x i8> %0
+}
+
+declare <1 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv1i8.iXLen.iXLen.iXLen(iXLen, <1 x i8>, iXLen, iXLen)
+
+define <2 x i8> @test_sf_vc_v_iv_se_e8mf4(<2 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv2i8.iXLen.iXLen.iXLen(iXLen 3, <2 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <2 x i8> %0
+}
+
+declare <2 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv2i8.iXLen.iXLen.iXLen(iXLen, <2 x i8>, iXLen, iXLen)
+
+define <4 x i8> @test_sf_vc_v_iv_se_e8mf2(<4 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv4i8.iXLen.iXLen.iXLen(iXLen 3, <4 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <4 x i8> %0
+}
+
+declare <4 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv4i8.iXLen.iXLen.iXLen(iXLen, <4 x i8>, iXLen, iXLen)
+
+define <8 x i8> @test_sf_vc_v_iv_se_e8m1(<8 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv8i8.iXLen.iXLen.iXLen(iXLen 3, <8 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <8 x i8> %0
+}
+
+declare <8 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv8i8.iXLen.iXLen.iXLen(iXLen, <8 x i8>, iXLen, iXLen)
+
+define <16 x i8> @test_sf_vc_v_iv_se_e8m2(<16 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv16i8.iXLen.iXLen.iXLen(iXLen 3, <16 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <16 x i8> %0
+}
+
+declare <16 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv16i8.iXLen.iXLen.iXLen(iXLen, <16 x i8>, iXLen, iXLen)
+
+define <32 x i8> @test_sf_vc_v_iv_se_e8m4(<32 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv32i8.iXLen.iXLen.iXLen(iXLen 3, <32 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <32 x i8> %0
+}
+
+declare <32 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv32i8.iXLen.iXLen.iXLen(iXLen, <32 x i8>, iXLen, iXLen)
+
+define <64 x i8> @test_sf_vc_v_iv_se_e8m8(<64 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <64 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv64i8.iXLen.iXLen.iXLen(iXLen 3, <64 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <64 x i8> %0
+}
+
+declare <64 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv64i8.iXLen.iXLen.iXLen(iXLen, <64 x i8>, iXLen, iXLen)
+
+define <1 x i16> @test_sf_vc_v_iv_se_e16mf4(<1 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv1i16.iXLen.iXLen.iXLen(iXLen 3, <1 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <1 x i16> %0
+}
+
+declare <1 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv1i16.iXLen.iXLen.iXLen(iXLen, <1 x i16>, iXLen, iXLen)
+
+define <2 x i16> @test_sf_vc_v_iv_se_e16mf2(<2 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv2i16.iXLen.iXLen.iXLen(iXLen 3, <2 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <2 x i16> %0
+}
+
+declare <2 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv2i16.iXLen.iXLen.iXLen(iXLen, <2 x i16>, iXLen, iXLen)
+
+define <4 x i16> @test_sf_vc_v_iv_se_e16m1(<4 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv4i16.iXLen.iXLen.iXLen(iXLen 3, <4 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <4 x i16> %0
+}
+
+declare <4 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv4i16.iXLen.iXLen.iXLen(iXLen, <4 x i16>, iXLen, iXLen)
+
+define <8 x i16> @test_sf_vc_v_iv_se_e16m2(<8 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv8i16.iXLen.iXLen.iXLen(iXLen 3, <8 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <8 x i16> %0
+}
+
+declare <8 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv8i16.iXLen.iXLen.iXLen(iXLen, <8 x i16>, iXLen, iXLen)
+
+define <16 x i16> @test_sf_vc_v_iv_se_e16m4(<16 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv16i16.iXLen.iXLen.iXLen(iXLen 3, <16 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <16 x i16> %0
+}
+
+declare <16 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv16i16.iXLen.iXLen.iXLen(iXLen, <16 x i16>, iXLen, iXLen)
+
+define <32 x i16> @test_sf_vc_v_iv_se_e16m8(<32 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv32i16.iXLen.iXLen.iXLen(iXLen 3, <32 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <32 x i16> %0
+}
+
+declare <32 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv32i16.iXLen.iXLen.iXLen(iXLen, <32 x i16>, iXLen, iXLen)
+
+define <1 x i32> @test_sf_vc_v_iv_se_e32mf2(<1 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv1i32.iXLen.iXLen.iXLen(iXLen 3, <1 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <1 x i32> %0
+}
+
+declare <1 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv1i32.iXLen.iXLen.iXLen(iXLen, <1 x i32>, iXLen, iXLen)
+
+define <2 x i32> @test_sf_vc_v_iv_se_e32m1(<2 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv2i32.iXLen.iXLen.iXLen(iXLen 3, <2 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <2 x i32> %0
+}
+
+declare <2 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv2i32.iXLen.iXLen.iXLen(iXLen, <2 x i32>, iXLen, iXLen)
+
+define <4 x i32> @test_sf_vc_v_iv_se_e32m2(<4 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv4i32.iXLen.iXLen.iXLen(iXLen 3, <4 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <4 x i32> %0
+}
+
+declare <4 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv4i32.iXLen.iXLen.iXLen(iXLen, <4 x i32>, iXLen, iXLen)
+
+define <8 x i32> @test_sf_vc_v_iv_se_e32m4(<8 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv8i32.iXLen.iXLen.iXLen(iXLen 3, <8 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <8 x i32> %0
+}
+
+declare <8 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv8i32.iXLen.iXLen.iXLen(iXLen, <8 x i32>, iXLen, iXLen)
+
+define <16 x i32> @test_sf_vc_v_iv_se_e32m8(<16 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv16i32.iXLen.iXLen.iXLen(iXLen 3, <16 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <16 x i32> %0
+}
+
+declare <16 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv16i32.iXLen.iXLen.iXLen(iXLen, <16 x i32>, iXLen, iXLen)
+
+define <1 x i64> @test_sf_vc_v_iv_se_e64m1(<1 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv1i64.iXLen.iXLen.iXLen(iXLen 3, <1 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret <1 x i64> %0
+}
+
+declare <1 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv1i64.iXLen.iXLen.iXLen(iXLen, <1 x i64>, iXLen, iXLen)
+
+define <2 x i64> @test_sf_vc_v_iv_se_e64m2(<2 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv2i64.iXLen.iXLen.iXLen(iXLen 3, <2 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret <2 x i64> %0
+}
+
+declare <2 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv2i64.iXLen.iXLen.iXLen(iXLen, <2 x i64>, iXLen, iXLen)
+
+define <4 x i64> @test_sf_vc_v_iv_se_e64m4(<4 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv4i64.iXLen.iXLen.iXLen(iXLen 3, <4 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret <4 x i64> %0
+}
+
+declare <4 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv4i64.iXLen.iXLen.iXLen(iXLen, <4 x i64>, iXLen, iXLen)
+
+define <8 x i64> @test_sf_vc_v_iv_se_e64m8(<8 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_se_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv8i64.iXLen.iXLen.iXLen(iXLen 3, <8 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret <8 x i64> %0
+}
+
+declare <8 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv8i64.iXLen.iXLen.iXLen(iXLen, <8 x i64>, iXLen, iXLen)
+
+define <1 x i8> @test_sf_vc_v_iv_e8mf8(<1 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i8> @llvm.riscv.sf.vc.v.iv.nxv1i8.iXLen.iXLen.iXLen(iXLen 3, <1 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <1 x i8> %0
+}
+
+declare <1 x i8> @llvm.riscv.sf.vc.v.iv.nxv1i8.iXLen.iXLen.iXLen(iXLen, <1 x i8>, iXLen, iXLen)
+
+define <2 x i8> @test_sf_vc_v_iv_e8mf4(<2 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i8> @llvm.riscv.sf.vc.v.iv.nxv2i8.iXLen.iXLen.iXLen(iXLen 3, <2 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <2 x i8> %0
+}
+
+declare <2 x i8> @llvm.riscv.sf.vc.v.iv.nxv2i8.iXLen.iXLen.iXLen(iXLen, <2 x i8>, iXLen, iXLen)
+
+define <4 x i8> @test_sf_vc_v_iv_e8mf2(<4 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i8> @llvm.riscv.sf.vc.v.iv.nxv4i8.iXLen.iXLen.iXLen(iXLen 3, <4 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <4 x i8> %0
+}
+
+declare <4 x i8> @llvm.riscv.sf.vc.v.iv.nxv4i8.iXLen.iXLen.iXLen(iXLen, <4 x i8>, iXLen, iXLen)
+
+define <8 x i8> @test_sf_vc_v_iv_e8m1(<8 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i8> @llvm.riscv.sf.vc.v.iv.nxv8i8.iXLen.iXLen.iXLen(iXLen 3, <8 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <8 x i8> %0
+}
+
+declare <8 x i8> @llvm.riscv.sf.vc.v.iv.nxv8i8.iXLen.iXLen.iXLen(iXLen, <8 x i8>, iXLen, iXLen)
+
+define <16 x i8> @test_sf_vc_v_iv_e8m2(<16 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i8> @llvm.riscv.sf.vc.v.iv.nxv16i8.iXLen.iXLen.iXLen(iXLen 3, <16 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <16 x i8> %0
+}
+
+declare <16 x i8> @llvm.riscv.sf.vc.v.iv.nxv16i8.iXLen.iXLen.iXLen(iXLen, <16 x i8>, iXLen, iXLen)
+
+define <32 x i8> @test_sf_vc_v_iv_e8m4(<32 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i8> @llvm.riscv.sf.vc.v.iv.nxv32i8.iXLen.iXLen.iXLen(iXLen 3, <32 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <32 x i8> %0
+}
+
+declare <32 x i8> @llvm.riscv.sf.vc.v.iv.nxv32i8.iXLen.iXLen.iXLen(iXLen, <32 x i8>, iXLen, iXLen)
+
+define <64 x i8> @test_sf_vc_v_iv_e8m8(<64 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <64 x i8> @llvm.riscv.sf.vc.v.iv.nxv64i8.iXLen.iXLen.iXLen(iXLen 3, <64 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <64 x i8> %0
+}
+
+declare <64 x i8> @llvm.riscv.sf.vc.v.iv.nxv64i8.iXLen.iXLen.iXLen(iXLen, <64 x i8>, iXLen, iXLen)
+
+define <1 x i16> @test_sf_vc_v_iv_e16mf4(<1 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.iv.nxv1i16.iXLen.iXLen.iXLen(iXLen 3, <1 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <1 x i16> %0
+}
+
+declare <1 x i16> @llvm.riscv.sf.vc.v.iv.nxv1i16.iXLen.iXLen.iXLen(iXLen, <1 x i16>, iXLen, iXLen)
+
+define <2 x i16> @test_sf_vc_v_iv_e16mf2(<2 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.iv.nxv2i16.iXLen.iXLen.iXLen(iXLen 3, <2 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <2 x i16> %0
+}
+
+declare <2 x i16> @llvm.riscv.sf.vc.v.iv.nxv2i16.iXLen.iXLen.iXLen(iXLen, <2 x i16>, iXLen, iXLen)
+
+define <4 x i16> @test_sf_vc_v_iv_e16m1(<4 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.iv.nxv4i16.iXLen.iXLen.iXLen(iXLen 3, <4 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <4 x i16> %0
+}
+
+declare <4 x i16> @llvm.riscv.sf.vc.v.iv.nxv4i16.iXLen.iXLen.iXLen(iXLen, <4 x i16>, iXLen, iXLen)
+
+define <8 x i16> @test_sf_vc_v_iv_e16m2(<8 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.iv.nxv8i16.iXLen.iXLen.iXLen(iXLen 3, <8 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <8 x i16> %0
+}
+
+declare <8 x i16> @llvm.riscv.sf.vc.v.iv.nxv8i16.iXLen.iXLen.iXLen(iXLen, <8 x i16>, iXLen, iXLen)
+
+define <16 x i16> @test_sf_vc_v_iv_e16m4(<16 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.iv.nxv16i16.iXLen.iXLen.iXLen(iXLen 3, <16 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <16 x i16> %0
+}
+
+declare <16 x i16> @llvm.riscv.sf.vc.v.iv.nxv16i16.iXLen.iXLen.iXLen(iXLen, <16 x i16>, iXLen, iXLen)
+
+define <32 x i16> @test_sf_vc_v_iv_e16m8(<32 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.iv.nxv32i16.iXLen.iXLen.iXLen(iXLen 3, <32 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <32 x i16> %0
+}
+
+declare <32 x i16> @llvm.riscv.sf.vc.v.iv.nxv32i16.iXLen.iXLen.iXLen(iXLen, <32 x i16>, iXLen, iXLen)
+
+define <1 x i32> @test_sf_vc_v_iv_e32mf2(<1 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.iv.nxv1i32.iXLen.iXLen.iXLen(iXLen 3, <1 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <1 x i32> %0
+}
+
+declare <1 x i32> @llvm.riscv.sf.vc.v.iv.nxv1i32.iXLen.iXLen.iXLen(iXLen, <1 x i32>, iXLen, iXLen)
+
+define <2 x i32> @test_sf_vc_v_iv_e32m1(<2 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.iv.nxv2i32.iXLen.iXLen.iXLen(iXLen 3, <2 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <2 x i32> %0
+}
+
+declare <2 x i32> @llvm.riscv.sf.vc.v.iv.nxv2i32.iXLen.iXLen.iXLen(iXLen, <2 x i32>, iXLen, iXLen)
+
+define <4 x i32> @test_sf_vc_v_iv_e32m2(<4 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.iv.nxv4i32.iXLen.iXLen.iXLen(iXLen 3, <4 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <4 x i32> %0
+}
+
+declare <4 x i32> @llvm.riscv.sf.vc.v.iv.nxv4i32.iXLen.iXLen.iXLen(iXLen, <4 x i32>, iXLen, iXLen)
+
+define <8 x i32> @test_sf_vc_v_iv_e32m4(<8 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.iv.nxv8i32.iXLen.iXLen.iXLen(iXLen 3, <8 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <8 x i32> %0
+}
+
+declare <8 x i32> @llvm.riscv.sf.vc.v.iv.nxv8i32.iXLen.iXLen.iXLen(iXLen, <8 x i32>, iXLen, iXLen)
+
+define <16 x i32> @test_sf_vc_v_iv_e32m8(<16 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.iv.nxv16i32.iXLen.iXLen.iXLen(iXLen 3, <16 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <16 x i32> %0
+}
+
+declare <16 x i32> @llvm.riscv.sf.vc.v.iv.nxv16i32.iXLen.iXLen.iXLen(iXLen, <16 x i32>, iXLen, iXLen)
+
+define <1 x i64> @test_sf_vc_v_iv_e64m1(<1 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.iv.nxv1i64.iXLen.iXLen.iXLen(iXLen 3, <1 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret <1 x i64> %0
+}
+
+declare <1 x i64> @llvm.riscv.sf.vc.v.iv.nxv1i64.iXLen.iXLen.iXLen(iXLen, <1 x i64>, iXLen, iXLen)
+
+define <2 x i64> @test_sf_vc_v_iv_e64m2(<2 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.iv.nxv2i64.iXLen.iXLen.iXLen(iXLen 3, <2 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret <2 x i64> %0
+}
+
+declare <2 x i64> @llvm.riscv.sf.vc.v.iv.nxv2i64.iXLen.iXLen.iXLen(iXLen, <2 x i64>, iXLen, iXLen)
+
+define <4 x i64> @test_sf_vc_v_iv_e64m4(<4 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.iv.nxv4i64.iXLen.iXLen.iXLen(iXLen 3, <4 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret <4 x i64> %0
+}
+
+declare <4 x i64> @llvm.riscv.sf.vc.v.iv.nxv4i64.iXLen.iXLen.iXLen(iXLen, <4 x i64>, iXLen, iXLen)
+
+define <8 x i64> @test_sf_vc_v_iv_e64m8(<8 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_iv_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.iv.nxv8i64.iXLen.iXLen.iXLen(iXLen 3, <8 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret <8 x i64> %0
+}
+
+declare <8 x i64> @llvm.riscv.sf.vc.v.iv.nxv8i64.iXLen.iXLen.iXLen(iXLen, <8 x i64>, iXLen, iXLen)
+
+define void @test_sf_vc_fv_se_e16mf4(<1 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i16.f16.iXLen(iXLen 1, iXLen 31, <1 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i16.f16.iXLen(iXLen, iXLen, <1 x i16>, half, iXLen)
+
+define void @test_sf_vc_fv_se_e16mf2(<2 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i16.f16.iXLen(iXLen 1, iXLen 31, <2 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i16.f16.iXLen(iXLen, iXLen, <2 x i16>, half, iXLen)
+
+define void @test_sf_vc_fv_se_e16m1(<4 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i16.f16.iXLen(iXLen 1, iXLen 31, <4 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i16.f16.iXLen(iXLen, iXLen, <4 x i16>, half, iXLen)
+
+define void @test_sf_vc_fv_se_e16m2(<8 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i16.f16.iXLen(iXLen 1, iXLen 31, <8 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i16.f16.iXLen(iXLen, iXLen, <8 x i16>, half, iXLen)
+
+define void @test_sf_vc_fv_se_e16m4(<16 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16i16.f16.iXLen(iXLen 1, iXLen 31, <16 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16i16.f16.iXLen(iXLen, iXLen, <16 x i16>, half, iXLen)
+
+define void @test_sf_vc_fv_se_e16m8(<32 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv32i16.f16.iXLen(iXLen 1, iXLen 31, <32 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv32i16.f16.iXLen(iXLen, iXLen, <32 x i16>, half, iXLen)
+
+define void @test_sf_vc_fv_se_e32mf2(<1 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i32.f32.iXLen(iXLen 1, iXLen 31, <1 x i32> %vs2, float %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i32.f32.iXLen(iXLen, iXLen, <1 x i32>, float, iXLen)
+
+define void @test_sf_vc_fv_se_e32m1(<2 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i32.f32.iXLen(iXLen 1, iXLen 31, <2 x i32> %vs2, float %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i32.f32.iXLen(iXLen, iXLen, <2 x i32>, float, iXLen)
+
+define void @test_sf_vc_fv_se_e32m2(<4 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i32.f32.iXLen(iXLen 1, iXLen 31, <4 x i32> %vs2, float %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i32.f32.iXLen(iXLen, iXLen, <4 x i32>, float, iXLen)
+
+define void @test_sf_vc_fv_se_e32m4(<8 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i32.f32.iXLen(iXLen 1, iXLen 31, <8 x i32> %vs2, float %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i32.f32.iXLen(iXLen, iXLen, <8 x i32>, float, iXLen)
+
+define void @test_sf_vc_fv_se_e32m8(<16 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16i32.f32.iXLen(iXLen 1, iXLen 31, <16 x i32> %vs2, float %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16i32.f32.iXLen(iXLen, iXLen, <16 x i32>, float, iXLen)
+
+define void @test_sf_vc_fv_se_e64m1(<1 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i64.f64.iXLen(iXLen 1, iXLen 31, <1 x i64> %vs2, double %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i64.f64.iXLen(iXLen, iXLen, <1 x i64>, double, iXLen)
+
+define void @test_sf_vc_fv_se_e64m2(<2 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i64.f64.iXLen(iXLen 1, iXLen 31, <2 x i64> %vs2, double %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i64.f64.iXLen(iXLen, iXLen, <2 x i64>, double, iXLen)
+
+define void @test_sf_vc_fv_se_e64m4(<4 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i64.f64.iXLen(iXLen 1, iXLen 31, <4 x i64> %vs2, double %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i64.f64.iXLen(iXLen, iXLen, <4 x i64>, double, iXLen)
+
+define void @test_sf_vc_fv_se_e64m8(<8 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fv_se_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i64.f64.iXLen(iXLen 1, iXLen 31, <8 x i64> %vs2, double %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i64.f64.iXLen(iXLen, iXLen, <8 x i64>, double, iXLen)
+
+define <1 x i16> @test_sf_vc_v_fv_se_e16mf4(<1 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv1i16.iXLen.f16.iXLen(iXLen 1, <1 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <1 x i16> %0
+}
+
+declare <1 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv1i16.iXLen.f16.iXLen(iXLen, <1 x i16>, half, iXLen)
+
+define <2 x i16> @test_sf_vc_v_fv_se_e16mf2(<2 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv2i16.iXLen.f16.iXLen(iXLen 1, <2 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <2 x i16> %0
+}
+
+declare <2 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv2i16.iXLen.f16.iXLen(iXLen, <2 x i16>, half, iXLen)
+
+define <4 x i16> @test_sf_vc_v_fv_se_e16m1(<4 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv4i16.iXLen.f16.iXLen(iXLen 1, <4 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <4 x i16> %0
+}
+
+declare <4 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv4i16.iXLen.f16.iXLen(iXLen, <4 x i16>, half, iXLen)
+
+define <8 x i16> @test_sf_vc_v_fv_se_e16m2(<8 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv8i16.iXLen.f16.iXLen(iXLen 1, <8 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <8 x i16> %0
+}
+
+declare <8 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv8i16.iXLen.f16.iXLen(iXLen, <8 x i16>, half, iXLen)
+
+define <16 x i16> @test_sf_vc_v_fv_se_e16m4(<16 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv16i16.iXLen.f16.iXLen(iXLen 1, <16 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <16 x i16> %0
+}
+
+declare <16 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv16i16.iXLen.f16.iXLen(iXLen, <16 x i16>, half, iXLen)
+
+define <32 x i16> @test_sf_vc_v_fv_se_e16m8(<32 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv32i16.iXLen.f16.iXLen(iXLen 1, <32 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <32 x i16> %0
+}
+
+declare <32 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv32i16.iXLen.f16.iXLen(iXLen, <32 x i16>, half, iXLen)
+
+define <1 x i32> @test_sf_vc_v_fv_se_e32mf2(<1 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv1i32.iXLen.f32.iXLen(iXLen 1, <1 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <1 x i32> %0
+}
+
+declare <1 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv1i32.iXLen.f32.iXLen(iXLen, <1 x i32>, float, iXLen)
+
+define <2 x i32> @test_sf_vc_v_fv_se_e32m1(<2 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv2i32.iXLen.f32.iXLen(iXLen 1, <2 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <2 x i32> %0
+}
+
+declare <2 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv2i32.iXLen.f32.iXLen(iXLen, <2 x i32>, float, iXLen)
+
+define <4 x i32> @test_sf_vc_v_fv_se_e32m2(<4 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv4i32.iXLen.f32.iXLen(iXLen 1, <4 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <4 x i32> %0
+}
+
+declare <4 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv4i32.iXLen.f32.iXLen(iXLen, <4 x i32>, float, iXLen)
+
+define <8 x i32> @test_sf_vc_v_fv_se_e32m4(<8 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv8i32.iXLen.f32.iXLen(iXLen 1, <8 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <8 x i32> %0
+}
+
+declare <8 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv8i32.iXLen.f32.iXLen(iXLen, <8 x i32>, float, iXLen)
+
+define <16 x i32> @test_sf_vc_v_fv_se_e32m8(<16 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv16i32.iXLen.f32.iXLen(iXLen 1, <16 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <16 x i32> %0
+}
+
+declare <16 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv16i32.iXLen.f32.iXLen(iXLen, <16 x i32>, float, iXLen)
+
+define <1 x i64> @test_sf_vc_v_fv_se_e64m1(<1 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv1i64.iXLen.f64.iXLen(iXLen 1, <1 x i64> %vs2, double %fs1, iXLen %vl)
+ ret <1 x i64> %0
+}
+
+declare <1 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv1i64.iXLen.f64.iXLen(iXLen, <1 x i64>, double, iXLen)
+
+define <2 x i64> @test_sf_vc_v_fv_se_e64m2(<2 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv2i64.iXLen.f64.iXLen(iXLen 1, <2 x i64> %vs2, double %fs1, iXLen %vl)
+ ret <2 x i64> %0
+}
+
+declare <2 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv2i64.iXLen.f64.iXLen(iXLen, <2 x i64>, double, iXLen)
+
+define <4 x i64> @test_sf_vc_v_fv_se_e64m4(<4 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv4i64.iXLen.f64.iXLen(iXLen 1, <4 x i64> %vs2, double %fs1, iXLen %vl)
+ ret <4 x i64> %0
+}
+
+declare <4 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv4i64.iXLen.f64.iXLen(iXLen, <4 x i64>, double, iXLen)
+
+define <8 x i64> @test_sf_vc_v_fv_se_e64m8(<8 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_se_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv8i64.iXLen.f64.iXLen(iXLen 1, <8 x i64> %vs2, double %fs1, iXLen %vl)
+ ret <8 x i64> %0
+}
+
+declare <8 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv8i64.iXLen.f64.iXLen(iXLen, <8 x i64>, double, iXLen)
+
+define <1 x i16> @test_sf_vc_v_fv_e16mf4(<1 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.fv.nxv1i16.iXLen.f16.iXLen(iXLen 1, <1 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <1 x i16> %0
+}
+
+declare <1 x i16> @llvm.riscv.sf.vc.v.fv.nxv1i16.iXLen.f16.iXLen(iXLen, <1 x i16>, half, iXLen)
+
+define <2 x i16> @test_sf_vc_v_fv_e16mf2(<2 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.fv.nxv2i16.iXLen.f16.iXLen(iXLen 1, <2 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <2 x i16> %0
+}
+
+declare <2 x i16> @llvm.riscv.sf.vc.v.fv.nxv2i16.iXLen.f16.iXLen(iXLen, <2 x i16>, half, iXLen)
+
+define <4 x i16> @test_sf_vc_v_fv_e16m1(<4 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.fv.nxv4i16.iXLen.f16.iXLen(iXLen 1, <4 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <4 x i16> %0
+}
+
+declare <4 x i16> @llvm.riscv.sf.vc.v.fv.nxv4i16.iXLen.f16.iXLen(iXLen, <4 x i16>, half, iXLen)
+
+define <8 x i16> @test_sf_vc_v_fv_e16m2(<8 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.fv.nxv8i16.iXLen.f16.iXLen(iXLen 1, <8 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <8 x i16> %0
+}
+
+declare <8 x i16> @llvm.riscv.sf.vc.v.fv.nxv8i16.iXLen.f16.iXLen(iXLen, <8 x i16>, half, iXLen)
+
+define <16 x i16> @test_sf_vc_v_fv_e16m4(<16 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.fv.nxv16i16.iXLen.f16.iXLen(iXLen 1, <16 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <16 x i16> %0
+}
+
+declare <16 x i16> @llvm.riscv.sf.vc.v.fv.nxv16i16.iXLen.f16.iXLen(iXLen, <16 x i16>, half, iXLen)
+
+define <32 x i16> @test_sf_vc_v_fv_e16m8(<32 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.fv.nxv32i16.iXLen.f16.iXLen(iXLen 1, <32 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <32 x i16> %0
+}
+
+declare <32 x i16> @llvm.riscv.sf.vc.v.fv.nxv32i16.iXLen.f16.iXLen(iXLen, <32 x i16>, half, iXLen)
+
+define <1 x i32> @test_sf_vc_v_fv_e32mf2(<1 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.fv.nxv1i32.iXLen.f32.iXLen(iXLen 1, <1 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <1 x i32> %0
+}
+
+declare <1 x i32> @llvm.riscv.sf.vc.v.fv.nxv1i32.iXLen.f32.iXLen(iXLen, <1 x i32>, float, iXLen)
+
+define <2 x i32> @test_sf_vc_v_fv_e32m1(<2 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.fv.nxv2i32.iXLen.f32.iXLen(iXLen 1, <2 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <2 x i32> %0
+}
+
+declare <2 x i32> @llvm.riscv.sf.vc.v.fv.nxv2i32.iXLen.f32.iXLen(iXLen, <2 x i32>, float, iXLen)
+
+define <4 x i32> @test_sf_vc_v_fv_e32m2(<4 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.fv.nxv4i32.iXLen.f32.iXLen(iXLen 1, <4 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <4 x i32> %0
+}
+
+declare <4 x i32> @llvm.riscv.sf.vc.v.fv.nxv4i32.iXLen.f32.iXLen(iXLen, <4 x i32>, float, iXLen)
+
+define <8 x i32> @test_sf_vc_v_fv_e32m4(<8 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.fv.nxv8i32.iXLen.f32.iXLen(iXLen 1, <8 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <8 x i32> %0
+}
+
+declare <8 x i32> @llvm.riscv.sf.vc.v.fv.nxv8i32.iXLen.f32.iXLen(iXLen, <8 x i32>, float, iXLen)
+
+define <16 x i32> @test_sf_vc_v_fv_e32m8(<16 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.fv.nxv16i32.iXLen.f32.iXLen(iXLen 1, <16 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <16 x i32> %0
+}
+
+declare <16 x i32> @llvm.riscv.sf.vc.v.fv.nxv16i32.iXLen.f32.iXLen(iXLen, <16 x i32>, float, iXLen)
+
+define <1 x i64> @test_sf_vc_v_fv_e64m1(<1 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.fv.nxv1i64.iXLen.f64.iXLen(iXLen 1, <1 x i64> %vs2, double %fs1, iXLen %vl)
+ ret <1 x i64> %0
+}
+
+declare <1 x i64> @llvm.riscv.sf.vc.v.fv.nxv1i64.iXLen.f64.iXLen(iXLen, <1 x i64>, double, iXLen)
+
+define <2 x i64> @test_sf_vc_v_fv_e64m2(<2 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.fv.nxv2i64.iXLen.f64.iXLen(iXLen 1, <2 x i64> %vs2, double %fs1, iXLen %vl)
+ ret <2 x i64> %0
+}
+
+declare <2 x i64> @llvm.riscv.sf.vc.v.fv.nxv2i64.iXLen.f64.iXLen(iXLen, <2 x i64>, double, iXLen)
+
+define <4 x i64> @test_sf_vc_v_fv_e64m4(<4 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.fv.nxv4i64.iXLen.f64.iXLen(iXLen 1, <4 x i64> %vs2, double %fs1, iXLen %vl)
+ ret <4 x i64> %0
+}
+
+declare <4 x i64> @llvm.riscv.sf.vc.v.fv.nxv4i64.iXLen.f64.iXLen(iXLen, <4 x i64>, double, iXLen)
+
+define <8 x i64> @test_sf_vc_v_fv_e64m8(<8 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fv_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.fv.nxv8i64.iXLen.f64.iXLen(iXLen 1, <8 x i64> %vs2, double %fs1, iXLen %vl)
+ ret <8 x i64> %0
+}
+
+declare <8 x i64> @llvm.riscv.sf.vc.v.fv.nxv8i64.iXLen.f64.iXLen(iXLen, <8 x i64>, double, iXLen)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xvv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xvv.ll
new file mode 100644
index 000000000000000..7be1f1ab65f808d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xvv.ll
@@ -0,0 +1,3008 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+xsfvcp \
+; RUN: -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+xsfvcp \
+; RUN: -verify-machineinstrs | FileCheck %s
+
+define void @test_sf_vc_vvv_se_e8mf8(<1 x i8> %vd, <1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i8.nxv1i8.iXLen(iXLen 3, <1 x i8> %vd, <1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, <1 x i8>, <1 x i8>, <1 x i8>, iXLen)
+
+define void @test_sf_vc_vvv_se_e8mf4(<2 x i8> %vd, <2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i8.nxv2i8.iXLen(iXLen 3, <2 x i8> %vd, <2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, <2 x i8>, <2 x i8>, <2 x i8>, iXLen)
+
+define void @test_sf_vc_vvv_se_e8mf2(<4 x i8> %vd, <4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i8.nxv4i8.iXLen(iXLen 3, <4 x i8> %vd, <4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, <4 x i8>, <4 x i8>, <4 x i8>, iXLen)
+
+define void @test_sf_vc_vvv_se_e8m1(<8 x i8> %vd, <8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i8.nxv8i8.iXLen(iXLen 3, <8 x i8> %vd, <8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, <8 x i8>, <8 x i8>, <8 x i8>, iXLen)
+
+define void @test_sf_vc_vvv_se_e8m2(<16 x i8> %vd, <16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i8.nxv16i8.iXLen(iXLen 3, <16 x i8> %vd, <16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, <16 x i8>, <16 x i8>, <16 x i8>, iXLen)
+
+define void @test_sf_vc_vvv_se_e8m4(<32 x i8> %vd, <32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32i8.nxv32i8.iXLen(iXLen 3, <32 x i8> %vd, <32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, <32 x i8>, <32 x i8>, <32 x i8>, iXLen)
+
+define void @test_sf_vc_vvv_se_e8m8(<64 x i8> %vd, <64 x i8> %vs2, <64 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv64i8.nxv64i8.iXLen(iXLen 3, <64 x i8> %vd, <64 x i8> %vs2, <64 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv64i8.nxv64i8.iXLen(iXLen, <64 x i8>, <64 x i8>, <64 x i8>, iXLen)
+
+define void @test_sf_vc_vvv_se_e16mf4(<1 x i16> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i16.nxv1i16.iXLen(iXLen 3, <1 x i16> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, <1 x i16>, <1 x i16>, <1 x i16>, iXLen)
+
+define void @test_sf_vc_vvv_se_e16mf2(<2 x i16> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i16.nxv2i16.iXLen(iXLen 3, <2 x i16> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, <2 x i16>, <2 x i16>, <2 x i16>, iXLen)
+
+define void @test_sf_vc_vvv_se_e16m1(<4 x i16> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i16.nxv4i16.iXLen(iXLen 3, <4 x i16> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, <4 x i16>, <4 x i16>, <4 x i16>, iXLen)
+
+define void @test_sf_vc_vvv_se_e16m2(<8 x i16> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i16.nxv8i16.iXLen(iXLen 3, <8 x i16> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, <8 x i16>, <8 x i16>, <8 x i16>, iXLen)
+
+define void @test_sf_vc_vvv_se_e16m4(<16 x i16> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i16.nxv16i16.iXLen(iXLen 3, <16 x i16> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, <16 x i16>, <16 x i16>, <16 x i16>, iXLen)
+
+define void @test_sf_vc_vvv_se_e16m8(<32 x i16> %vd, <32 x i16> %vs2, <32 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32i16.nxv32i16.iXLen(iXLen 3, <32 x i16> %vd, <32 x i16> %vs2, <32 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32i16.nxv32i16.iXLen(iXLen, <32 x i16>, <32 x i16>, <32 x i16>, iXLen)
+
+define void @test_sf_vc_vvv_se_e32mf2(<1 x i32> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i32.nxv1i32.iXLen(iXLen 3, <1 x i32> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, <1 x i32>, <1 x i32>, <1 x i32>, iXLen)
+
+define void @test_sf_vc_vvv_se_e32m1(<2 x i32> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i32.nxv2i32.iXLen(iXLen 3, <2 x i32> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, <2 x i32>, <2 x i32>, <2 x i32>, iXLen)
+
+define void @test_sf_vc_vvv_se_e32m2(<4 x i32> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i32.nxv4i32.iXLen(iXLen 3, <4 x i32> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, <4 x i32>, <4 x i32>, <4 x i32>, iXLen)
+
+define void @test_sf_vc_vvv_se_e32m4(<8 x i32> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i32.nxv8i32.iXLen(iXLen 3, <8 x i32> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, <8 x i32>, <8 x i32>, <8 x i32>, iXLen)
+
+define void @test_sf_vc_vvv_se_e32m8(<16 x i32> %vd, <16 x i32> %vs2, <16 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i32.nxv16i32.iXLen(iXLen 3, <16 x i32> %vd, <16 x i32> %vs2, <16 x i32> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i32.nxv16i32.iXLen(iXLen, <16 x i32>, <16 x i32>, <16 x i32>, iXLen)
+
+define void @test_sf_vc_vvv_se_e64m1(<1 x i64> %vd, <1 x i64> %vs2, <1 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i64.nxv1i64.iXLen(iXLen 3, <1 x i64> %vd, <1 x i64> %vs2, <1 x i64> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i64.nxv1i64.iXLen(iXLen, <1 x i64>, <1 x i64>, <1 x i64>, iXLen)
+
+define void @test_sf_vc_vvv_se_e64m2(<2 x i64> %vd, <2 x i64> %vs2, <2 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i64.nxv2i64.iXLen(iXLen 3, <2 x i64> %vd, <2 x i64> %vs2, <2 x i64> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i64.nxv2i64.iXLen(iXLen, <2 x i64>, <2 x i64>, <2 x i64>, iXLen)
+
+define void @test_sf_vc_vvv_se_e64m4(<4 x i64> %vd, <4 x i64> %vs2, <4 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i64.nxv4i64.iXLen(iXLen 3, <4 x i64> %vd, <4 x i64> %vs2, <4 x i64> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i64.nxv4i64.iXLen(iXLen, <4 x i64>, <4 x i64>, <4 x i64>, iXLen)
+
+define void @test_sf_vc_vvv_se_e64m8(<8 x i64> %vd, <8 x i64> %vs2, <8 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvv_se_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i64.nxv8i64.iXLen(iXLen 3, <8 x i64> %vd, <8 x i64> %vs2, <8 x i64> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i64.nxv8i64.iXLen(iXLen, <8 x i64>, <8 x i64>, <8 x i64>, iXLen)
+
+define <1 x i8> @test_sf_vc_v_vvv_se_e8mf8(<1 x i8> %vd, <1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv1i8.iXLen.nxv1i8.iXLen(iXLen 3, <1 x i8> %vd, <1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl)
+ ret <1 x i8> %0
+}
+
+declare <1 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, <1 x i8>, <1 x i8>, <1 x i8>, iXLen)
+
+define <2 x i8> @test_sf_vc_v_vvv_se_e8mf4(<2 x i8> %vd, <2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv2i8.iXLen.nxv2i8.iXLen(iXLen 3, <2 x i8> %vd, <2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl)
+ ret <2 x i8> %0
+}
+
+declare <2 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, <2 x i8>, <2 x i8>, <2 x i8>, iXLen)
+
+define <4 x i8> @test_sf_vc_v_vvv_se_e8mf2(<4 x i8> %vd, <4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv4i8.iXLen.nxv4i8.iXLen(iXLen 3, <4 x i8> %vd, <4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl)
+ ret <4 x i8> %0
+}
+
+declare <4 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, <4 x i8>, <4 x i8>, <4 x i8>, iXLen)
+
+define <8 x i8> @test_sf_vc_v_vvv_se_e8m1(<8 x i8> %vd, <8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv8i8.iXLen.nxv8i8.iXLen(iXLen 3, <8 x i8> %vd, <8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl)
+ ret <8 x i8> %0
+}
+
+declare <8 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, <8 x i8>, <8 x i8>, <8 x i8>, iXLen)
+
+define <16 x i8> @test_sf_vc_v_vvv_se_e8m2(<16 x i8> %vd, <16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv16i8.iXLen.nxv16i8.iXLen(iXLen 3, <16 x i8> %vd, <16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl)
+ ret <16 x i8> %0
+}
+
+declare <16 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, <16 x i8>, <16 x i8>, <16 x i8>, iXLen)
+
+define <32 x i8> @test_sf_vc_v_vvv_se_e8m4(<32 x i8> %vd, <32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv32i8.iXLen.nxv32i8.iXLen(iXLen 3, <32 x i8> %vd, <32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl)
+ ret <32 x i8> %0
+}
+
+declare <32 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, <32 x i8>, <32 x i8>, <32 x i8>, iXLen)
+
+define <64 x i8> @test_sf_vc_v_vvv_se_e8m8(<64 x i8> %vd, <64 x i8> %vs2, <64 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <64 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.iXLen.nxv64i8.iXLen(iXLen 3, <64 x i8> %vd, <64 x i8> %vs2, <64 x i8> %vs1, iXLen %vl)
+ ret <64 x i8> %0
+}
+
+declare <64 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, <64 x i8>, <64 x i8>, <64 x i8>, iXLen)
+
+define <1 x i16> @test_sf_vc_v_vvv_se_e16mf4(<1 x i16> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv1i16.iXLen.nxv1i16.iXLen(iXLen 3, <1 x i16> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl)
+ ret <1 x i16> %0
+}
+
+declare <1 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, <1 x i16>, <1 x i16>, <1 x i16>, iXLen)
+
+define <2 x i16> @test_sf_vc_v_vvv_se_e16mf2(<2 x i16> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv2i16.iXLen.nxv2i16.iXLen(iXLen 3, <2 x i16> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl)
+ ret <2 x i16> %0
+}
+
+declare <2 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, <2 x i16>, <2 x i16>, <2 x i16>, iXLen)
+
+define <4 x i16> @test_sf_vc_v_vvv_se_e16m1(<4 x i16> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv4i16.iXLen.nxv4i16.iXLen(iXLen 3, <4 x i16> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl)
+ ret <4 x i16> %0
+}
+
+declare <4 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, <4 x i16>, <4 x i16>, <4 x i16>, iXLen)
+
+define <8 x i16> @test_sf_vc_v_vvv_se_e16m2(<8 x i16> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv8i16.iXLen.nxv8i16.iXLen(iXLen 3, <8 x i16> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl)
+ ret <8 x i16> %0
+}
+
+declare <8 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, <8 x i16>, <8 x i16>, <8 x i16>, iXLen)
+
+define <16 x i16> @test_sf_vc_v_vvv_se_e16m4(<16 x i16> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv16i16.iXLen.nxv16i16.iXLen(iXLen 3, <16 x i16> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl)
+ ret <16 x i16> %0
+}
+
+declare <16 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, <16 x i16>, <16 x i16>, <16 x i16>, iXLen)
+
+define <32 x i16> @test_sf_vc_v_vvv_se_e16m8(<32 x i16> %vd, <32 x i16> %vs2, <32 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.iXLen.nxv32i16.iXLen(iXLen 3, <32 x i16> %vd, <32 x i16> %vs2, <32 x i16> %vs1, iXLen %vl)
+ ret <32 x i16> %0
+}
+
+declare <32 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, <32 x i16>, <32 x i16>, <32 x i16>, iXLen)
+
+define <1 x i32> @test_sf_vc_v_vvv_se_e32mf2(<1 x i32> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv1i32.iXLen.nxv1i32.iXLen(iXLen 3, <1 x i32> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl)
+ ret <1 x i32> %0
+}
+
+declare <1 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, <1 x i32>, <1 x i32>, <1 x i32>, iXLen)
+
+define <2 x i32> @test_sf_vc_v_vvv_se_e32m1(<2 x i32> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv2i32.iXLen.nxv2i32.iXLen(iXLen 3, <2 x i32> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl)
+ ret <2 x i32> %0
+}
+
+declare <2 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, <2 x i32>, <2 x i32>, <2 x i32>, iXLen)
+
+define <4 x i32> @test_sf_vc_v_vvv_se_e32m2(<4 x i32> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv4i32.iXLen.nxv4i32.iXLen(iXLen 3, <4 x i32> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl)
+ ret <4 x i32> %0
+}
+
+declare <4 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, <4 x i32>, <4 x i32>, <4 x i32>, iXLen)
+
+define <8 x i32> @test_sf_vc_v_vvv_se_e32m4(<8 x i32> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv8i32.iXLen.nxv8i32.iXLen(iXLen 3, <8 x i32> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl)
+ ret <8 x i32> %0
+}
+
+declare <8 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, <8 x i32>, <8 x i32>, <8 x i32>, iXLen)
+
+define <16 x i32> @test_sf_vc_v_vvv_se_e32m8(<16 x i32> %vd, <16 x i32> %vs2, <16 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.iXLen.nxv16i32.iXLen(iXLen 3, <16 x i32> %vd, <16 x i32> %vs2, <16 x i32> %vs1, iXLen %vl)
+ ret <16 x i32> %0
+}
+
+declare <16 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, <16 x i32>, <16 x i32>, <16 x i32>, iXLen)
+
+define <1 x i64> @test_sf_vc_v_vvv_se_e64m1(<1 x i64> %vd, <1 x i64> %vs2, <1 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv1i64.iXLen.nxv1i64.iXLen(iXLen 3, <1 x i64> %vd, <1 x i64> %vs2, <1 x i64> %vs1, iXLen %vl)
+ ret <1 x i64> %0
+}
+
+declare <1 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, <1 x i64>, <1 x i64>, <1 x i64>, iXLen)
+
+define <2 x i64> @test_sf_vc_v_vvv_se_e64m2(<2 x i64> %vd, <2 x i64> %vs2, <2 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv2i64.iXLen.nxv2i64.iXLen(iXLen 3, <2 x i64> %vd, <2 x i64> %vs2, <2 x i64> %vs1, iXLen %vl)
+ ret <2 x i64> %0
+}
+
+declare <2 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, <2 x i64>, <2 x i64>, <2 x i64>, iXLen)
+
+define <4 x i64> @test_sf_vc_v_vvv_se_e64m4(<4 x i64> %vd, <4 x i64> %vs2, <4 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv4i64.iXLen.nxv4i64.iXLen(iXLen 3, <4 x i64> %vd, <4 x i64> %vs2, <4 x i64> %vs1, iXLen %vl)
+ ret <4 x i64> %0
+}
+
+declare <4 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, <4 x i64>, <4 x i64>, <4 x i64>, iXLen)
+
+define <8 x i64> @test_sf_vc_v_vvv_se_e64m8(<8 x i64> %vd, <8 x i64> %vs2, <8 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.iXLen.nxv8i64.iXLen(iXLen 3, <8 x i64> %vd, <8 x i64> %vs2, <8 x i64> %vs1, iXLen %vl)
+ ret <8 x i64> %0
+}
+
+declare <8 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, <8 x i64>, <8 x i64>, <8 x i64>, iXLen)
+
+define <1 x i8> @test_sf_vc_v_vvv_e8mf8(<1 x i8> %vd, <1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i8> @llvm.riscv.sf.vc.v.vvv.nxv1i8.iXLen.nxv1i8.iXLen(iXLen 3, <1 x i8> %vd, <1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl)
+ ret <1 x i8> %0
+}
+
+declare <1 x i8> @llvm.riscv.sf.vc.v.vvv.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, <1 x i8>, <1 x i8>, <1 x i8>, iXLen)
+
+define <2 x i8> @test_sf_vc_v_vvv_e8mf4(<2 x i8> %vd, <2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i8> @llvm.riscv.sf.vc.v.vvv.nxv2i8.iXLen.nxv2i8.iXLen(iXLen 3, <2 x i8> %vd, <2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl)
+ ret <2 x i8> %0
+}
+
+declare <2 x i8> @llvm.riscv.sf.vc.v.vvv.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, <2 x i8>, <2 x i8>, <2 x i8>, iXLen)
+
+define <4 x i8> @test_sf_vc_v_vvv_e8mf2(<4 x i8> %vd, <4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i8> @llvm.riscv.sf.vc.v.vvv.nxv4i8.iXLen.nxv4i8.iXLen(iXLen 3, <4 x i8> %vd, <4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl)
+ ret <4 x i8> %0
+}
+
+declare <4 x i8> @llvm.riscv.sf.vc.v.vvv.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, <4 x i8>, <4 x i8>, <4 x i8>, iXLen)
+
+define <8 x i8> @test_sf_vc_v_vvv_e8m1(<8 x i8> %vd, <8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i8> @llvm.riscv.sf.vc.v.vvv.nxv8i8.iXLen.nxv8i8.iXLen(iXLen 3, <8 x i8> %vd, <8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl)
+ ret <8 x i8> %0
+}
+
+declare <8 x i8> @llvm.riscv.sf.vc.v.vvv.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, <8 x i8>, <8 x i8>, <8 x i8>, iXLen)
+
+define <16 x i8> @test_sf_vc_v_vvv_e8m2(<16 x i8> %vd, <16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i8> @llvm.riscv.sf.vc.v.vvv.nxv16i8.iXLen.nxv16i8.iXLen(iXLen 3, <16 x i8> %vd, <16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl)
+ ret <16 x i8> %0
+}
+
+declare <16 x i8> @llvm.riscv.sf.vc.v.vvv.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, <16 x i8>, <16 x i8>, <16 x i8>, iXLen)
+
+define <32 x i8> @test_sf_vc_v_vvv_e8m4(<32 x i8> %vd, <32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i8> @llvm.riscv.sf.vc.v.vvv.nxv32i8.iXLen.nxv32i8.iXLen(iXLen 3, <32 x i8> %vd, <32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl)
+ ret <32 x i8> %0
+}
+
+declare <32 x i8> @llvm.riscv.sf.vc.v.vvv.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, <32 x i8>, <32 x i8>, <32 x i8>, iXLen)
+
+define <64 x i8> @test_sf_vc_v_vvv_e8m8(<64 x i8> %vd, <64 x i8> %vs2, <64 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <64 x i8> @llvm.riscv.sf.vc.v.vvv.nxv64i8.iXLen.nxv64i8.iXLen(iXLen 3, <64 x i8> %vd, <64 x i8> %vs2, <64 x i8> %vs1, iXLen %vl)
+ ret <64 x i8> %0
+}
+
+declare <64 x i8> @llvm.riscv.sf.vc.v.vvv.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, <64 x i8>, <64 x i8>, <64 x i8>, iXLen)
+
+define <1 x i16> @test_sf_vc_v_vvv_e16mf4(<1 x i16> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.vvv.nxv1i16.iXLen.nxv1i16.iXLen(iXLen 3, <1 x i16> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl)
+ ret <1 x i16> %0
+}
+
+declare <1 x i16> @llvm.riscv.sf.vc.v.vvv.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, <1 x i16>, <1 x i16>, <1 x i16>, iXLen)
+
+define <2 x i16> @test_sf_vc_v_vvv_e16mf2(<2 x i16> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.vvv.nxv2i16.iXLen.nxv2i16.iXLen(iXLen 3, <2 x i16> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl)
+ ret <2 x i16> %0
+}
+
+declare <2 x i16> @llvm.riscv.sf.vc.v.vvv.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, <2 x i16>, <2 x i16>, <2 x i16>, iXLen)
+
+define <4 x i16> @test_sf_vc_v_vvv_e16m1(<4 x i16> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.vvv.nxv4i16.iXLen.nxv4i16.iXLen(iXLen 3, <4 x i16> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl)
+ ret <4 x i16> %0
+}
+
+declare <4 x i16> @llvm.riscv.sf.vc.v.vvv.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, <4 x i16>, <4 x i16>, <4 x i16>, iXLen)
+
+define <8 x i16> @test_sf_vc_v_vvv_e16m2(<8 x i16> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.vvv.nxv8i16.iXLen.nxv8i16.iXLen(iXLen 3, <8 x i16> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl)
+ ret <8 x i16> %0
+}
+
+declare <8 x i16> @llvm.riscv.sf.vc.v.vvv.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, <8 x i16>, <8 x i16>, <8 x i16>, iXLen)
+
+define <16 x i16> @test_sf_vc_v_vvv_e16m4(<16 x i16> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.vvv.nxv16i16.iXLen.nxv16i16.iXLen(iXLen 3, <16 x i16> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl)
+ ret <16 x i16> %0
+}
+
+declare <16 x i16> @llvm.riscv.sf.vc.v.vvv.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, <16 x i16>, <16 x i16>, <16 x i16>, iXLen)
+
+define <32 x i16> @test_sf_vc_v_vvv_e16m8(<32 x i16> %vd, <32 x i16> %vs2, <32 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.vvv.nxv32i16.iXLen.nxv32i16.iXLen(iXLen 3, <32 x i16> %vd, <32 x i16> %vs2, <32 x i16> %vs1, iXLen %vl)
+ ret <32 x i16> %0
+}
+
+declare <32 x i16> @llvm.riscv.sf.vc.v.vvv.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, <32 x i16>, <32 x i16>, <32 x i16>, iXLen)
+
+define <1 x i32> @test_sf_vc_v_vvv_e32mf2(<1 x i32> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.vvv.nxv1i32.iXLen.nxv1i32.iXLen(iXLen 3, <1 x i32> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl)
+ ret <1 x i32> %0
+}
+
+declare <1 x i32> @llvm.riscv.sf.vc.v.vvv.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, <1 x i32>, <1 x i32>, <1 x i32>, iXLen)
+
+define <2 x i32> @test_sf_vc_v_vvv_e32m1(<2 x i32> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.vvv.nxv2i32.iXLen.nxv2i32.iXLen(iXLen 3, <2 x i32> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl)
+ ret <2 x i32> %0
+}
+
+declare <2 x i32> @llvm.riscv.sf.vc.v.vvv.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, <2 x i32>, <2 x i32>, <2 x i32>, iXLen)
+
+define <4 x i32> @test_sf_vc_v_vvv_e32m2(<4 x i32> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.vvv.nxv4i32.iXLen.nxv4i32.iXLen(iXLen 3, <4 x i32> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl)
+ ret <4 x i32> %0
+}
+
+declare <4 x i32> @llvm.riscv.sf.vc.v.vvv.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, <4 x i32>, <4 x i32>, <4 x i32>, iXLen)
+
+define <8 x i32> @test_sf_vc_v_vvv_e32m4(<8 x i32> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.vvv.nxv8i32.iXLen.nxv8i32.iXLen(iXLen 3, <8 x i32> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl)
+ ret <8 x i32> %0
+}
+
+declare <8 x i32> @llvm.riscv.sf.vc.v.vvv.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, <8 x i32>, <8 x i32>, <8 x i32>, iXLen)
+
+define <16 x i32> @test_sf_vc_v_vvv_e32m8(<16 x i32> %vd, <16 x i32> %vs2, <16 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.vvv.nxv16i32.iXLen.nxv16i32.iXLen(iXLen 3, <16 x i32> %vd, <16 x i32> %vs2, <16 x i32> %vs1, iXLen %vl)
+ ret <16 x i32> %0
+}
+
+declare <16 x i32> @llvm.riscv.sf.vc.v.vvv.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, <16 x i32>, <16 x i32>, <16 x i32>, iXLen)
+
+define <1 x i64> @test_sf_vc_v_vvv_e64m1(<1 x i64> %vd, <1 x i64> %vs2, <1 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.vvv.nxv1i64.iXLen.nxv1i64.iXLen(iXLen 3, <1 x i64> %vd, <1 x i64> %vs2, <1 x i64> %vs1, iXLen %vl)
+ ret <1 x i64> %0
+}
+
+declare <1 x i64> @llvm.riscv.sf.vc.v.vvv.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, <1 x i64>, <1 x i64>, <1 x i64>, iXLen)
+
+define <2 x i64> @test_sf_vc_v_vvv_e64m2(<2 x i64> %vd, <2 x i64> %vs2, <2 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.vvv.nxv2i64.iXLen.nxv2i64.iXLen(iXLen 3, <2 x i64> %vd, <2 x i64> %vs2, <2 x i64> %vs1, iXLen %vl)
+ ret <2 x i64> %0
+}
+
+declare <2 x i64> @llvm.riscv.sf.vc.v.vvv.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, <2 x i64>, <2 x i64>, <2 x i64>, iXLen)
+
+define <4 x i64> @test_sf_vc_v_vvv_e64m4(<4 x i64> %vd, <4 x i64> %vs2, <4 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.vvv.nxv4i64.iXLen.nxv4i64.iXLen(iXLen 3, <4 x i64> %vd, <4 x i64> %vs2, <4 x i64> %vs1, iXLen %vl)
+ ret <4 x i64> %0
+}
+
+declare <4 x i64> @llvm.riscv.sf.vc.v.vvv.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, <4 x i64>, <4 x i64>, <4 x i64>, iXLen)
+
+define <8 x i64> @test_sf_vc_v_vvv_e64m8(<8 x i64> %vd, <8 x i64> %vs2, <8 x i64> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvv_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.vvv.nxv8i64.iXLen.nxv8i64.iXLen(iXLen 3, <8 x i64> %vd, <8 x i64> %vs2, <8 x i64> %vs1, iXLen %vl)
+ ret <8 x i64> %0
+}
+
+declare <8 x i64> @llvm.riscv.sf.vc.v.vvv.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, <8 x i64>, <8 x i64>, <8 x i64>, iXLen)
+
+define void @test_sf_vc_xvv_se_e8mf8(<1 x i8> %vd, <1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1i8.i8.iXLen(iXLen 3, <1 x i8> %vd, <1 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1i8.i8.iXLen(iXLen, <1 x i8>, <1 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvv_se_e8mf4(<2 x i8> %vd, <2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2i8.i8.iXLen(iXLen 3, <2 x i8> %vd, <2 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2i8.i8.iXLen(iXLen, <2 x i8>, <2 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvv_se_e8mf2(<4 x i8> %vd, <4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4i8.i8.iXLen(iXLen 3, <4 x i8> %vd, <4 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4i8.i8.iXLen(iXLen, <4 x i8>, <4 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvv_se_e8m1(<8 x i8> %vd, <8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8i8.i8.iXLen(iXLen 3, <8 x i8> %vd, <8 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8i8.i8.iXLen(iXLen, <8 x i8>, <8 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvv_se_e8m2(<16 x i8> %vd, <16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16i8.i8.iXLen(iXLen 3, <16 x i8> %vd, <16 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16i8.i8.iXLen(iXLen, <16 x i8>, <16 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvv_se_e8m4(<32 x i8> %vd, <32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32i8.i8.iXLen(iXLen 3, <32 x i8> %vd, <32 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32i8.i8.iXLen(iXLen, <32 x i8>, <32 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvv_se_e8m8(<64 x i8> %vd, <64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv64i8.i8.iXLen(iXLen 3, <64 x i8> %vd, <64 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv64i8.i8.iXLen(iXLen, <64 x i8>, <64 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvv_se_e16mf4(<1 x i16> %vd, <1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1i16.i16.iXLen(iXLen 3, <1 x i16> %vd, <1 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1i16.i16.iXLen(iXLen, <1 x i16>, <1 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xvv_se_e16mf2(<2 x i16> %vd, <2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2i16.i16.iXLen(iXLen 3, <2 x i16> %vd, <2 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2i16.i16.iXLen(iXLen, <2 x i16>, <2 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xvv_se_e16m1(<4 x i16> %vd, <4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4i16.i16.iXLen(iXLen 3, <4 x i16> %vd, <4 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4i16.i16.iXLen(iXLen, <4 x i16>, <4 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xvv_se_e16m2(<8 x i16> %vd, <8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8i16.i16.iXLen(iXLen 3, <8 x i16> %vd, <8 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8i16.i16.iXLen(iXLen, <8 x i16>, <8 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xvv_se_e16m4(<16 x i16> %vd, <16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16i16.i16.iXLen(iXLen 3, <16 x i16> %vd, <16 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16i16.i16.iXLen(iXLen, <16 x i16>, <16 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xvv_se_e16m8(<32 x i16> %vd, <32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32i16.i16.iXLen(iXLen 3, <32 x i16> %vd, <32 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32i16.i16.iXLen(iXLen, <32 x i16>, <32 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xvv_se_e32mf2(<1 x i32> %vd, <1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i32.iXLen.iXLen(iXLen 3, <1 x i32> %vd, <1 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i32.iXLen.iXLen(iXLen, <1 x i32>, <1 x i32>, i32, iXLen)
+
+define void @test_sf_vc_xvv_se_e32m1(<2 x i32> %vd, <2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i32.iXLen.iXLen(iXLen 3, <2 x i32> %vd, <2 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i32.iXLen.iXLen(iXLen, <2 x i32>, <2 x i32>, i32, iXLen)
+
+define void @test_sf_vc_xvv_se_e32m2(<4 x i32> %vd, <4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i32.iXLen.iXLen(iXLen 3, <4 x i32> %vd, <4 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i32.iXLen.iXLen(iXLen, <4 x i32>, <4 x i32>, i32, iXLen)
+
+define void @test_sf_vc_xvv_se_e32m4(<8 x i32> %vd, <8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i32.iXLen.iXLen(iXLen 3, <8 x i32> %vd, <8 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i32.iXLen.iXLen(iXLen, <8 x i32>, <8 x i32>, i32, iXLen)
+
+define void @test_sf_vc_xvv_se_e32m8(<16 x i32> %vd, <16 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvv_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.xvv 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i32.iXLen.iXLen(iXLen 3, <16 x i32> %vd, <16 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i32.iXLen.iXLen(iXLen, <16 x i32>, <16 x i32>, i32, iXLen)
+
+define <1 x i8> @test_sf_vc_v_xvv_se_e8mf8(<1 x i8> %vd, <1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv1i8.iXLen.i8.iXLen(iXLen 3, <1 x i8> %vd, <1 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <1 x i8> %0
+}
+
+declare <1 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv1i8.iXLen.i8.iXLen(iXLen, <1 x i8>, <1 x i8>, i8, iXLen)
+
+define <2 x i8> @test_sf_vc_v_xvv_se_e8mf4(<2 x i8> %vd, <2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv2i8.iXLen.i8.iXLen(iXLen 3, <2 x i8> %vd, <2 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <2 x i8> %0
+}
+
+declare <2 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv2i8.iXLen.i8.iXLen(iXLen, <2 x i8>, <2 x i8>, i8, iXLen)
+
+define <4 x i8> @test_sf_vc_v_xvv_se_e8mf2(<4 x i8> %vd, <4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv4i8.iXLen.i8.iXLen(iXLen 3, <4 x i8> %vd, <4 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <4 x i8> %0
+}
+
+declare <4 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv4i8.iXLen.i8.iXLen(iXLen, <4 x i8>, <4 x i8>, i8, iXLen)
+
+define <8 x i8> @test_sf_vc_v_xvv_se_e8m1(<8 x i8> %vd, <8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv8i8.iXLen.i8.iXLen(iXLen 3, <8 x i8> %vd, <8 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <8 x i8> %0
+}
+
+declare <8 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv8i8.iXLen.i8.iXLen(iXLen, <8 x i8>, <8 x i8>, i8, iXLen)
+
+define <16 x i8> @test_sf_vc_v_xvv_se_e8m2(<16 x i8> %vd, <16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv16i8.iXLen.i8.iXLen(iXLen 3, <16 x i8> %vd, <16 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <16 x i8> %0
+}
+
+declare <16 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv16i8.iXLen.i8.iXLen(iXLen, <16 x i8>, <16 x i8>, i8, iXLen)
+
+define <32 x i8> @test_sf_vc_v_xvv_se_e8m4(<32 x i8> %vd, <32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv32i8.iXLen.i8.iXLen(iXLen 3, <32 x i8> %vd, <32 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <32 x i8> %0
+}
+
+declare <32 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv32i8.iXLen.i8.iXLen(iXLen, <32 x i8>, <32 x i8>, i8, iXLen)
+
+define <64 x i8> @test_sf_vc_v_xvv_se_e8m8(<64 x i8> %vd, <64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <64 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv64i8.iXLen.i8.iXLen(iXLen 3, <64 x i8> %vd, <64 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <64 x i8> %0
+}
+
+declare <64 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv64i8.iXLen.i8.iXLen(iXLen, <64 x i8>, <64 x i8>, i8, iXLen)
+
+define <1 x i16> @test_sf_vc_v_xvv_se_e16mf4(<1 x i16> %vd, <1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv1i16.iXLen.i16.iXLen(iXLen 3, <1 x i16> %vd, <1 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <1 x i16> %0
+}
+
+declare <1 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv1i16.iXLen.i16.iXLen(iXLen, <1 x i16>, <1 x i16>, i16, iXLen)
+
+define <2 x i16> @test_sf_vc_v_xvv_se_e16mf2(<2 x i16> %vd, <2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv2i16.iXLen.i16.iXLen(iXLen 3, <2 x i16> %vd, <2 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <2 x i16> %0
+}
+
+declare <2 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv2i16.iXLen.i16.iXLen(iXLen, <2 x i16>, <2 x i16>, i16, iXLen)
+
+define <4 x i16> @test_sf_vc_v_xvv_se_e16m1(<4 x i16> %vd, <4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv4i16.iXLen.i16.iXLen(iXLen 3, <4 x i16> %vd, <4 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <4 x i16> %0
+}
+
+declare <4 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv4i16.iXLen.i16.iXLen(iXLen, <4 x i16>, <4 x i16>, i16, iXLen)
+
+define <8 x i16> @test_sf_vc_v_xvv_se_e16m2(<8 x i16> %vd, <8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv8i16.iXLen.i16.iXLen(iXLen 3, <8 x i16> %vd, <8 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <8 x i16> %0
+}
+
+declare <8 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv8i16.iXLen.i16.iXLen(iXLen, <8 x i16>, <8 x i16>, i16, iXLen)
+
+define <16 x i16> @test_sf_vc_v_xvv_se_e16m4(<16 x i16> %vd, <16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv16i16.iXLen.i16.iXLen(iXLen 3, <16 x i16> %vd, <16 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <16 x i16> %0
+}
+
+declare <16 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv16i16.iXLen.i16.iXLen(iXLen, <16 x i16>, <16 x i16>, i16, iXLen)
+
+define <32 x i16> @test_sf_vc_v_xvv_se_e16m8(<32 x i16> %vd, <32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv32i16.iXLen.i16.iXLen(iXLen 3, <32 x i16> %vd, <32 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <32 x i16> %0
+}
+
+declare <32 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv32i16.iXLen.i16.iXLen(iXLen, <32 x i16>, <32 x i16>, i16, iXLen)
+
+define <1 x i32> @test_sf_vc_v_xvv_se_e32mf2(<1 x i32> %vd, <1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv1i32.iXLen.i32.iXLen(iXLen 3, <1 x i32> %vd, <1 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <1 x i32> %0
+}
+
+declare <1 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv1i32.iXLen.i32.iXLen(iXLen, <1 x i32>, <1 x i32>, i32, iXLen)
+
+define <2 x i32> @test_sf_vc_v_xvv_se_e32m1(<2 x i32> %vd, <2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv2i32.iXLen.i32.iXLen(iXLen 3, <2 x i32> %vd, <2 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <2 x i32> %0
+}
+
+declare <2 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv2i32.iXLen.i32.iXLen(iXLen, <2 x i32>, <2 x i32>, i32, iXLen)
+
+define <4 x i32> @test_sf_vc_v_xvv_se_e32m2(<4 x i32> %vd, <4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv4i32.iXLen.i32.iXLen(iXLen 3, <4 x i32> %vd, <4 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <4 x i32> %0
+}
+
+declare <4 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv4i32.iXLen.i32.iXLen(iXLen, <4 x i32>, <4 x i32>, i32, iXLen)
+
+define <8 x i32> @test_sf_vc_v_xvv_se_e32m4(<8 x i32> %vd, <8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv8i32.iXLen.i32.iXLen(iXLen 3, <8 x i32> %vd, <8 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <8 x i32> %0
+}
+
+declare <8 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv8i32.iXLen.i32.iXLen(iXLen, <8 x i32>, <8 x i32>, i32, iXLen)
+
+define <16 x i32> @test_sf_vc_v_xvv_se_e32m8(<16 x i32> %vd, <16 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv16i32.iXLen.i32.iXLen(iXLen 3, <16 x i32> %vd, <16 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <16 x i32> %0
+}
+
+declare <16 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv16i32.iXLen.i32.iXLen(iXLen, <16 x i32>, <16 x i32>, i32, iXLen)
+
+define <1 x i8> @test_sf_vc_v_xvv_e8mf8(<1 x i8> %vd, <1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i8> @llvm.riscv.sf.vc.v.xvv.nxv1i8.iXLen.i8.iXLen(iXLen 3, <1 x i8> %vd, <1 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <1 x i8> %0
+}
+
+declare <1 x i8> @llvm.riscv.sf.vc.v.xvv.nxv1i8.iXLen.i8.iXLen(iXLen, <1 x i8>, <1 x i8>, i8, iXLen)
+
+define <2 x i8> @test_sf_vc_v_xvv_e8mf4(<2 x i8> %vd, <2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i8> @llvm.riscv.sf.vc.v.xvv.nxv2i8.iXLen.i8.iXLen(iXLen 3, <2 x i8> %vd, <2 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <2 x i8> %0
+}
+
+declare <2 x i8> @llvm.riscv.sf.vc.v.xvv.nxv2i8.iXLen.i8.iXLen(iXLen, <2 x i8>, <2 x i8>, i8, iXLen)
+
+define <4 x i8> @test_sf_vc_v_xvv_e8mf2(<4 x i8> %vd, <4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i8> @llvm.riscv.sf.vc.v.xvv.nxv4i8.iXLen.i8.iXLen(iXLen 3, <4 x i8> %vd, <4 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <4 x i8> %0
+}
+
+declare <4 x i8> @llvm.riscv.sf.vc.v.xvv.nxv4i8.iXLen.i8.iXLen(iXLen, <4 x i8>, <4 x i8>, i8, iXLen)
+
+define <8 x i8> @test_sf_vc_v_xvv_e8m1(<8 x i8> %vd, <8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i8> @llvm.riscv.sf.vc.v.xvv.nxv8i8.iXLen.i8.iXLen(iXLen 3, <8 x i8> %vd, <8 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <8 x i8> %0
+}
+
+declare <8 x i8> @llvm.riscv.sf.vc.v.xvv.nxv8i8.iXLen.i8.iXLen(iXLen, <8 x i8>, <8 x i8>, i8, iXLen)
+
+define <16 x i8> @test_sf_vc_v_xvv_e8m2(<16 x i8> %vd, <16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i8> @llvm.riscv.sf.vc.v.xvv.nxv16i8.iXLen.i8.iXLen(iXLen 3, <16 x i8> %vd, <16 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <16 x i8> %0
+}
+
+declare <16 x i8> @llvm.riscv.sf.vc.v.xvv.nxv16i8.iXLen.i8.iXLen(iXLen, <16 x i8>, <16 x i8>, i8, iXLen)
+
+define <32 x i8> @test_sf_vc_v_xvv_e8m4(<32 x i8> %vd, <32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i8> @llvm.riscv.sf.vc.v.xvv.nxv32i8.iXLen.i8.iXLen(iXLen 3, <32 x i8> %vd, <32 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <32 x i8> %0
+}
+
+declare <32 x i8> @llvm.riscv.sf.vc.v.xvv.nxv32i8.iXLen.i8.iXLen(iXLen, <32 x i8>, <32 x i8>, i8, iXLen)
+
+define <64 x i8> @test_sf_vc_v_xvv_e8m8(<64 x i8> %vd, <64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <64 x i8> @llvm.riscv.sf.vc.v.xvv.nxv64i8.iXLen.i8.iXLen(iXLen 3, <64 x i8> %vd, <64 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <64 x i8> %0
+}
+
+declare <64 x i8> @llvm.riscv.sf.vc.v.xvv.nxv64i8.iXLen.i8.iXLen(iXLen, <64 x i8>, <64 x i8>, i8, iXLen)
+
+define <1 x i16> @test_sf_vc_v_xvv_e16mf4(<1 x i16> %vd, <1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.xvv.nxv1i16.iXLen.i16.iXLen(iXLen 3, <1 x i16> %vd, <1 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <1 x i16> %0
+}
+
+declare <1 x i16> @llvm.riscv.sf.vc.v.xvv.nxv1i16.iXLen.i16.iXLen(iXLen, <1 x i16>, <1 x i16>, i16, iXLen)
+
+define <2 x i16> @test_sf_vc_v_xvv_e16mf2(<2 x i16> %vd, <2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.xvv.nxv2i16.iXLen.i16.iXLen(iXLen 3, <2 x i16> %vd, <2 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <2 x i16> %0
+}
+
+declare <2 x i16> @llvm.riscv.sf.vc.v.xvv.nxv2i16.iXLen.i16.iXLen(iXLen, <2 x i16>, <2 x i16>, i16, iXLen)
+
+define <4 x i16> @test_sf_vc_v_xvv_e16m1(<4 x i16> %vd, <4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.xvv.nxv4i16.iXLen.i16.iXLen(iXLen 3, <4 x i16> %vd, <4 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <4 x i16> %0
+}
+
+declare <4 x i16> @llvm.riscv.sf.vc.v.xvv.nxv4i16.iXLen.i16.iXLen(iXLen, <4 x i16>, <4 x i16>, i16, iXLen)
+
+define <8 x i16> @test_sf_vc_v_xvv_e16m2(<8 x i16> %vd, <8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.xvv.nxv8i16.iXLen.i16.iXLen(iXLen 3, <8 x i16> %vd, <8 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <8 x i16> %0
+}
+
+declare <8 x i16> @llvm.riscv.sf.vc.v.xvv.nxv8i16.iXLen.i16.iXLen(iXLen, <8 x i16>, <8 x i16>, i16, iXLen)
+
+define <16 x i16> @test_sf_vc_v_xvv_e16m4(<16 x i16> %vd, <16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.xvv.nxv16i16.iXLen.i16.iXLen(iXLen 3, <16 x i16> %vd, <16 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <16 x i16> %0
+}
+
+declare <16 x i16> @llvm.riscv.sf.vc.v.xvv.nxv16i16.iXLen.i16.iXLen(iXLen, <16 x i16>, <16 x i16>, i16, iXLen)
+
+define <32 x i16> @test_sf_vc_v_xvv_e16m8(<32 x i16> %vd, <32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.xvv.nxv32i16.iXLen.i16.iXLen(iXLen 3, <32 x i16> %vd, <32 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <32 x i16> %0
+}
+
+declare <32 x i16> @llvm.riscv.sf.vc.v.xvv.nxv32i16.iXLen.i16.iXLen(iXLen, <32 x i16>, <32 x i16>, i16, iXLen)
+
+define <1 x i32> @test_sf_vc_v_xvv_e32mf2(<1 x i32> %vd, <1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.xvv.nxv1i32.iXLen.i32.iXLen(iXLen 3, <1 x i32> %vd, <1 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <1 x i32> %0
+}
+
+declare <1 x i32> @llvm.riscv.sf.vc.v.xvv.nxv1i32.iXLen.i32.iXLen(iXLen, <1 x i32>, <1 x i32>, i32, iXLen)
+
+define <2 x i32> @test_sf_vc_v_xvv_e32m1(<2 x i32> %vd, <2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.xvv.nxv2i32.iXLen.i32.iXLen(iXLen 3, <2 x i32> %vd, <2 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <2 x i32> %0
+}
+
+declare <2 x i32> @llvm.riscv.sf.vc.v.xvv.nxv2i32.iXLen.i32.iXLen(iXLen, <2 x i32>, <2 x i32>, i32, iXLen)
+
+define <4 x i32> @test_sf_vc_v_xvv_e32m2(<4 x i32> %vd, <4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.xvv.nxv4i32.iXLen.i32.iXLen(iXLen 3, <4 x i32> %vd, <4 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <4 x i32> %0
+}
+
+declare <4 x i32> @llvm.riscv.sf.vc.v.xvv.nxv4i32.iXLen.i32.iXLen(iXLen, <4 x i32>, <4 x i32>, i32, iXLen)
+
+define <8 x i32> @test_sf_vc_v_xvv_e32m4(<8 x i32> %vd, <8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.xvv.nxv8i32.iXLen.i32.iXLen(iXLen 3, <8 x i32> %vd, <8 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <8 x i32> %0
+}
+
+declare <8 x i32> @llvm.riscv.sf.vc.v.xvv.nxv8i32.iXLen.i32.iXLen(iXLen, <8 x i32>, <8 x i32>, i32, iXLen)
+
+define <16 x i32> @test_sf_vc_v_xvv_e32m8(<16 x i32> %vd, <16 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvv_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.xvv.nxv16i32.iXLen.i32.iXLen(iXLen 3, <16 x i32> %vd, <16 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <16 x i32> %0
+}
+
+declare <16 x i32> @llvm.riscv.sf.vc.v.xvv.nxv16i32.iXLen.i32.iXLen(iXLen, <16 x i32>, <16 x i32>, i32, iXLen)
+
+define void @test_sf_vc_ivv_se_e8mf8(<1 x i8> %vd, <1 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i8.iXLen.iXLen(iXLen 3, <1 x i8> %vd, <1 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i8.iXLen.iXLen(iXLen, <1 x i8>, <1 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e8mf4(<2 x i8> %vd, <2 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i8.iXLen.iXLen(iXLen 3, <2 x i8> %vd, <2 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i8.iXLen.iXLen(iXLen, <2 x i8>, <2 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e8mf2(<4 x i8> %vd, <4 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i8.iXLen.iXLen(iXLen 3, <4 x i8> %vd, <4 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i8.iXLen.iXLen(iXLen, <4 x i8>, <4 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e8m1(<8 x i8> %vd, <8 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i8.iXLen.iXLen(iXLen 3, <8 x i8> %vd, <8 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i8.iXLen.iXLen(iXLen, <8 x i8>, <8 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e8m2(<16 x i8> %vd, <16 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i8.iXLen.iXLen(iXLen 3, <16 x i8> %vd, <16 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i8.iXLen.iXLen(iXLen, <16 x i8>, <16 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e8m4(<32 x i8> %vd, <32 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32i8.iXLen.iXLen(iXLen 3, <32 x i8> %vd, <32 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32i8.iXLen.iXLen(iXLen, <32 x i8>, <32 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e8m8(<64 x i8> %vd, <64 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv64i8.iXLen.iXLen(iXLen 3, <64 x i8> %vd, <64 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv64i8.iXLen.iXLen(iXLen, <64 x i8>, <64 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e16mf4(<1 x i16> %vd, <1 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i16.iXLen.iXLen(iXLen 3, <1 x i16> %vd, <1 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i16.iXLen.iXLen(iXLen, <1 x i16>, <1 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e16mf2(<2 x i16> %vd, <2 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i16.iXLen.iXLen(iXLen 3, <2 x i16> %vd, <2 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i16.iXLen.iXLen(iXLen, <2 x i16>, <2 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e16m1(<4 x i16> %vd, <4 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i16.iXLen.iXLen(iXLen 3, <4 x i16> %vd, <4 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i16.iXLen.iXLen(iXLen, <4 x i16>, <4 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e16m2(<8 x i16> %vd, <8 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i16.iXLen.iXLen(iXLen 3, <8 x i16> %vd, <8 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i16.iXLen.iXLen(iXLen, <8 x i16>, <8 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e16m4(<16 x i16> %vd, <16 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i16.iXLen.iXLen(iXLen 3, <16 x i16> %vd, <16 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i16.iXLen.iXLen(iXLen, <16 x i16>, <16 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e16m8(<32 x i16> %vd, <32 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32i16.iXLen.iXLen(iXLen 3, <32 x i16> %vd, <32 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32i16.iXLen.iXLen(iXLen, <32 x i16>, <32 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e32mf2(<1 x i32> %vd, <1 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i32.iXLen.iXLen(iXLen 3, <1 x i32> %vd, <1 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i32.iXLen.iXLen(iXLen, <1 x i32>, <1 x i32>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e32m1(<2 x i32> %vd, <2 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i32.iXLen.iXLen(iXLen 3, <2 x i32> %vd, <2 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i32.iXLen.iXLen(iXLen, <2 x i32>, <2 x i32>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e32m2(<4 x i32> %vd, <4 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i32.iXLen.iXLen(iXLen 3, <4 x i32> %vd, <4 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i32.iXLen.iXLen(iXLen, <4 x i32>, <4 x i32>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e32m4(<8 x i32> %vd, <8 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i32.iXLen.iXLen(iXLen 3, <8 x i32> %vd, <8 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i32.iXLen.iXLen(iXLen, <8 x i32>, <8 x i32>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e32m8(<16 x i32> %vd, <16 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i32.iXLen.iXLen(iXLen 3, <16 x i32> %vd, <16 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i32.iXLen.iXLen(iXLen, <16 x i32>, <16 x i32>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e64m1(<1 x i64> %vd, <1 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i64.iXLen.iXLen(iXLen 3, <1 x i64> %vd, <1 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i64.iXLen.iXLen(iXLen, <1 x i64>, <1 x i64>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e64m2(<2 x i64> %vd, <2 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i64.iXLen.iXLen(iXLen 3, <2 x i64> %vd, <2 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i64.iXLen.iXLen(iXLen, <2 x i64>, <2 x i64>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e64m4(<4 x i64> %vd, <4 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i64.iXLen.iXLen(iXLen 3, <4 x i64> %vd, <4 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i64.iXLen.iXLen(iXLen, <4 x i64>, <4 x i64>, iXLen, iXLen)
+
+define void @test_sf_vc_ivv_se_e64m8(<8 x i64> %vd, <8 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivv_se_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.ivv 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i64.iXLen.iXLen(iXLen 3, <8 x i64> %vd, <8 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i64.iXLen.iXLen(iXLen, <8 x i64>, <8 x i64>, iXLen, iXLen)
+
+define <1 x i8> @test_sf_vc_v_ivv_se_e8mf8(<1 x i8> %vd, <1 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv1i8.iXLen.iXLen.iXLen(iXLen 3, <1 x i8> %vd, <1 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <1 x i8> %0
+}
+
+declare <1 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv1i8.iXLen.iXLen.iXLen(iXLen, <1 x i8>, <1 x i8>, iXLen, iXLen)
+
+define <2 x i8> @test_sf_vc_v_ivv_se_e8mf4(<2 x i8> %vd, <2 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv2i8.iXLen.iXLen.iXLen(iXLen 3, <2 x i8> %vd, <2 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <2 x i8> %0
+}
+
+declare <2 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv2i8.iXLen.iXLen.iXLen(iXLen, <2 x i8>, <2 x i8>, iXLen, iXLen)
+
+define <4 x i8> @test_sf_vc_v_ivv_se_e8mf2(<4 x i8> %vd, <4 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv4i8.iXLen.iXLen.iXLen(iXLen 3, <4 x i8> %vd, <4 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <4 x i8> %0
+}
+
+declare <4 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv4i8.iXLen.iXLen.iXLen(iXLen, <4 x i8>, <4 x i8>, iXLen, iXLen)
+
+define <8 x i8> @test_sf_vc_v_ivv_se_e8m1(<8 x i8> %vd, <8 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv8i8.iXLen.iXLen.iXLen(iXLen 3, <8 x i8> %vd, <8 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <8 x i8> %0
+}
+
+declare <8 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv8i8.iXLen.iXLen.iXLen(iXLen, <8 x i8>, <8 x i8>, iXLen, iXLen)
+
+define <16 x i8> @test_sf_vc_v_ivv_se_e8m2(<16 x i8> %vd, <16 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv16i8.iXLen.iXLen.iXLen(iXLen 3, <16 x i8> %vd, <16 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <16 x i8> %0
+}
+
+declare <16 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv16i8.iXLen.iXLen.iXLen(iXLen, <16 x i8>, <16 x i8>, iXLen, iXLen)
+
+define <32 x i8> @test_sf_vc_v_ivv_se_e8m4(<32 x i8> %vd, <32 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv32i8.iXLen.iXLen.iXLen(iXLen 3, <32 x i8> %vd, <32 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <32 x i8> %0
+}
+
+declare <32 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv32i8.iXLen.iXLen.iXLen(iXLen, <32 x i8>, <32 x i8>, iXLen, iXLen)
+
+define <64 x i8> @test_sf_vc_v_ivv_se_e8m8(<64 x i8> %vd, <64 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <64 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv64i8.iXLen.iXLen.iXLen(iXLen 3, <64 x i8> %vd, <64 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <64 x i8> %0
+}
+
+declare <64 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv64i8.iXLen.iXLen.iXLen(iXLen, <64 x i8>, <64 x i8>, iXLen, iXLen)
+
+define <1 x i16> @test_sf_vc_v_ivv_se_e16mf4(<1 x i16> %vd, <1 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv1i16.iXLen.iXLen.iXLen(iXLen 3, <1 x i16> %vd, <1 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <1 x i16> %0
+}
+
+declare <1 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv1i16.iXLen.iXLen.iXLen(iXLen, <1 x i16>, <1 x i16>, iXLen, iXLen)
+
+define <2 x i16> @test_sf_vc_v_ivv_se_e16mf2(<2 x i16> %vd, <2 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv2i16.iXLen.iXLen.iXLen(iXLen 3, <2 x i16> %vd, <2 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <2 x i16> %0
+}
+
+declare <2 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv2i16.iXLen.iXLen.iXLen(iXLen, <2 x i16>, <2 x i16>, iXLen, iXLen)
+
+define <4 x i16> @test_sf_vc_v_ivv_se_e16m1(<4 x i16> %vd, <4 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv4i16.iXLen.iXLen.iXLen(iXLen 3, <4 x i16> %vd, <4 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <4 x i16> %0
+}
+
+declare <4 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv4i16.iXLen.iXLen.iXLen(iXLen, <4 x i16>, <4 x i16>, iXLen, iXLen)
+
+define <8 x i16> @test_sf_vc_v_ivv_se_e16m2(<8 x i16> %vd, <8 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv8i16.iXLen.iXLen.iXLen(iXLen 3, <8 x i16> %vd, <8 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <8 x i16> %0
+}
+
+declare <8 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv8i16.iXLen.iXLen.iXLen(iXLen, <8 x i16>, <8 x i16>, iXLen, iXLen)
+
+define <16 x i16> @test_sf_vc_v_ivv_se_e16m4(<16 x i16> %vd, <16 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv16i16.iXLen.iXLen.iXLen(iXLen 3, <16 x i16> %vd, <16 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <16 x i16> %0
+}
+
+declare <16 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv16i16.iXLen.iXLen.iXLen(iXLen, <16 x i16>, <16 x i16>, iXLen, iXLen)
+
+define <32 x i16> @test_sf_vc_v_ivv_se_e16m8(<32 x i16> %vd, <32 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv32i16.iXLen.iXLen.iXLen(iXLen 3, <32 x i16> %vd, <32 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <32 x i16> %0
+}
+
+declare <32 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv32i16.iXLen.iXLen.iXLen(iXLen, <32 x i16>, <32 x i16>, iXLen, iXLen)
+
+define <1 x i32> @test_sf_vc_v_ivv_se_e32mf2(<1 x i32> %vd, <1 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv1i32.iXLen.iXLen.iXLen(iXLen 3, <1 x i32> %vd, <1 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <1 x i32> %0
+}
+
+declare <1 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv1i32.iXLen.iXLen.iXLen(iXLen, <1 x i32>, <1 x i32>, iXLen, iXLen)
+
+define <2 x i32> @test_sf_vc_v_ivv_se_e32m1(<2 x i32> %vd, <2 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv2i32.iXLen.iXLen.iXLen(iXLen 3, <2 x i32> %vd, <2 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <2 x i32> %0
+}
+
+declare <2 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv2i32.iXLen.iXLen.iXLen(iXLen, <2 x i32>, <2 x i32>, iXLen, iXLen)
+
+define <4 x i32> @test_sf_vc_v_ivv_se_e32m2(<4 x i32> %vd, <4 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv4i32.iXLen.iXLen.iXLen(iXLen 3, <4 x i32> %vd, <4 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <4 x i32> %0
+}
+
+declare <4 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv4i32.iXLen.iXLen.iXLen(iXLen, <4 x i32>, <4 x i32>, iXLen, iXLen)
+
+define <8 x i32> @test_sf_vc_v_ivv_se_e32m4(<8 x i32> %vd, <8 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv8i32.iXLen.iXLen.iXLen(iXLen 3, <8 x i32> %vd, <8 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <8 x i32> %0
+}
+
+declare <8 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv8i32.iXLen.iXLen.iXLen(iXLen, <8 x i32>, <8 x i32>, iXLen, iXLen)
+
+define <16 x i32> @test_sf_vc_v_ivv_se_e32m8(<16 x i32> %vd, <16 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv16i32.iXLen.iXLen.iXLen(iXLen 3, <16 x i32> %vd, <16 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <16 x i32> %0
+}
+
+declare <16 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv16i32.iXLen.iXLen.iXLen(iXLen, <16 x i32>, <16 x i32>, iXLen, iXLen)
+
+define <1 x i64> @test_sf_vc_v_ivv_se_e64m1(<1 x i64> %vd, <1 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv1i64.iXLen.iXLen.iXLen(iXLen 3, <1 x i64> %vd, <1 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret <1 x i64> %0
+}
+
+declare <1 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv1i64.iXLen.iXLen.iXLen(iXLen, <1 x i64>, <1 x i64>, iXLen, iXLen)
+
+define <2 x i64> @test_sf_vc_v_ivv_se_e64m2(<2 x i64> %vd, <2 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv2i64.iXLen.iXLen.iXLen(iXLen 3, <2 x i64> %vd, <2 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret <2 x i64> %0
+}
+
+declare <2 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv2i64.iXLen.iXLen.iXLen(iXLen, <2 x i64>, <2 x i64>, iXLen, iXLen)
+
+define <4 x i64> @test_sf_vc_v_ivv_se_e64m4(<4 x i64> %vd, <4 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv4i64.iXLen.iXLen.iXLen(iXLen 3, <4 x i64> %vd, <4 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret <4 x i64> %0
+}
+
+declare <4 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv4i64.iXLen.iXLen.iXLen(iXLen, <4 x i64>, <4 x i64>, iXLen, iXLen)
+
+define <8 x i64> @test_sf_vc_v_ivv_se_e64m8(<8 x i64> %vd, <8 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv8i64.iXLen.iXLen.iXLen(iXLen 3, <8 x i64> %vd, <8 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret <8 x i64> %0
+}
+
+declare <8 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv8i64.iXLen.iXLen.iXLen(iXLen, <8 x i64>, <8 x i64>, iXLen, iXLen)
+
+define <1 x i8> @test_sf_vc_v_ivv_e8mf8(<1 x i8> %vd, <1 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i8> @llvm.riscv.sf.vc.v.ivv.nxv1i8.iXLen.iXLen.iXLen(iXLen 3, <1 x i8> %vd, <1 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <1 x i8> %0
+}
+
+declare <1 x i8> @llvm.riscv.sf.vc.v.ivv.nxv1i8.iXLen.iXLen.iXLen(iXLen, <1 x i8>, <1 x i8>, iXLen, iXLen)
+
+define <2 x i8> @test_sf_vc_v_ivv_e8mf4(<2 x i8> %vd, <2 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i8> @llvm.riscv.sf.vc.v.ivv.nxv2i8.iXLen.iXLen.iXLen(iXLen 3, <2 x i8> %vd, <2 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <2 x i8> %0
+}
+
+declare <2 x i8> @llvm.riscv.sf.vc.v.ivv.nxv2i8.iXLen.iXLen.iXLen(iXLen, <2 x i8>, <2 x i8>, iXLen, iXLen)
+
+define <4 x i8> @test_sf_vc_v_ivv_e8mf2(<4 x i8> %vd, <4 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i8> @llvm.riscv.sf.vc.v.ivv.nxv4i8.iXLen.iXLen.iXLen(iXLen 3, <4 x i8> %vd, <4 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <4 x i8> %0
+}
+
+declare <4 x i8> @llvm.riscv.sf.vc.v.ivv.nxv4i8.iXLen.iXLen.iXLen(iXLen, <4 x i8>, <4 x i8>, iXLen, iXLen)
+
+define <8 x i8> @test_sf_vc_v_ivv_e8m1(<8 x i8> %vd, <8 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i8> @llvm.riscv.sf.vc.v.ivv.nxv8i8.iXLen.iXLen.iXLen(iXLen 3, <8 x i8> %vd, <8 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <8 x i8> %0
+}
+
+declare <8 x i8> @llvm.riscv.sf.vc.v.ivv.nxv8i8.iXLen.iXLen.iXLen(iXLen, <8 x i8>, <8 x i8>, iXLen, iXLen)
+
+define <16 x i8> @test_sf_vc_v_ivv_e8m2(<16 x i8> %vd, <16 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i8> @llvm.riscv.sf.vc.v.ivv.nxv16i8.iXLen.iXLen.iXLen(iXLen 3, <16 x i8> %vd, <16 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <16 x i8> %0
+}
+
+declare <16 x i8> @llvm.riscv.sf.vc.v.ivv.nxv16i8.iXLen.iXLen.iXLen(iXLen, <16 x i8>, <16 x i8>, iXLen, iXLen)
+
+define <32 x i8> @test_sf_vc_v_ivv_e8m4(<32 x i8> %vd, <32 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i8> @llvm.riscv.sf.vc.v.ivv.nxv32i8.iXLen.iXLen.iXLen(iXLen 3, <32 x i8> %vd, <32 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <32 x i8> %0
+}
+
+declare <32 x i8> @llvm.riscv.sf.vc.v.ivv.nxv32i8.iXLen.iXLen.iXLen(iXLen, <32 x i8>, <32 x i8>, iXLen, iXLen)
+
+define <64 x i8> @test_sf_vc_v_ivv_e8m8(<64 x i8> %vd, <64 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <64 x i8> @llvm.riscv.sf.vc.v.ivv.nxv64i8.iXLen.iXLen.iXLen(iXLen 3, <64 x i8> %vd, <64 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <64 x i8> %0
+}
+
+declare <64 x i8> @llvm.riscv.sf.vc.v.ivv.nxv64i8.iXLen.iXLen.iXLen(iXLen, <64 x i8>, <64 x i8>, iXLen, iXLen)
+
+define <1 x i16> @test_sf_vc_v_ivv_e16mf4(<1 x i16> %vd, <1 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.ivv.nxv1i16.iXLen.iXLen.iXLen(iXLen 3, <1 x i16> %vd, <1 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <1 x i16> %0
+}
+
+declare <1 x i16> @llvm.riscv.sf.vc.v.ivv.nxv1i16.iXLen.iXLen.iXLen(iXLen, <1 x i16>, <1 x i16>, iXLen, iXLen)
+
+define <2 x i16> @test_sf_vc_v_ivv_e16mf2(<2 x i16> %vd, <2 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.ivv.nxv2i16.iXLen.iXLen.iXLen(iXLen 3, <2 x i16> %vd, <2 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <2 x i16> %0
+}
+
+declare <2 x i16> @llvm.riscv.sf.vc.v.ivv.nxv2i16.iXLen.iXLen.iXLen(iXLen, <2 x i16>, <2 x i16>, iXLen, iXLen)
+
+define <4 x i16> @test_sf_vc_v_ivv_e16m1(<4 x i16> %vd, <4 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.ivv.nxv4i16.iXLen.iXLen.iXLen(iXLen 3, <4 x i16> %vd, <4 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <4 x i16> %0
+}
+
+declare <4 x i16> @llvm.riscv.sf.vc.v.ivv.nxv4i16.iXLen.iXLen.iXLen(iXLen, <4 x i16>, <4 x i16>, iXLen, iXLen)
+
+define <8 x i16> @test_sf_vc_v_ivv_e16m2(<8 x i16> %vd, <8 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.ivv.nxv8i16.iXLen.iXLen.iXLen(iXLen 3, <8 x i16> %vd, <8 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <8 x i16> %0
+}
+
+declare <8 x i16> @llvm.riscv.sf.vc.v.ivv.nxv8i16.iXLen.iXLen.iXLen(iXLen, <8 x i16>, <8 x i16>, iXLen, iXLen)
+
+define <16 x i16> @test_sf_vc_v_ivv_e16m4(<16 x i16> %vd, <16 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.ivv.nxv16i16.iXLen.iXLen.iXLen(iXLen 3, <16 x i16> %vd, <16 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <16 x i16> %0
+}
+
+declare <16 x i16> @llvm.riscv.sf.vc.v.ivv.nxv16i16.iXLen.iXLen.iXLen(iXLen, <16 x i16>, <16 x i16>, iXLen, iXLen)
+
+define <32 x i16> @test_sf_vc_v_ivv_e16m8(<32 x i16> %vd, <32 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.ivv.nxv32i16.iXLen.iXLen.iXLen(iXLen 3, <32 x i16> %vd, <32 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <32 x i16> %0
+}
+
+declare <32 x i16> @llvm.riscv.sf.vc.v.ivv.nxv32i16.iXLen.iXLen.iXLen(iXLen, <32 x i16>, <32 x i16>, iXLen, iXLen)
+
+define <1 x i32> @test_sf_vc_v_ivv_e32mf2(<1 x i32> %vd, <1 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.ivv.nxv1i32.iXLen.iXLen.iXLen(iXLen 3, <1 x i32> %vd, <1 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <1 x i32> %0
+}
+
+declare <1 x i32> @llvm.riscv.sf.vc.v.ivv.nxv1i32.iXLen.iXLen.iXLen(iXLen, <1 x i32>, <1 x i32>, iXLen, iXLen)
+
+define <2 x i32> @test_sf_vc_v_ivv_e32m1(<2 x i32> %vd, <2 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.ivv.nxv2i32.iXLen.iXLen.iXLen(iXLen 3, <2 x i32> %vd, <2 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <2 x i32> %0
+}
+
+declare <2 x i32> @llvm.riscv.sf.vc.v.ivv.nxv2i32.iXLen.iXLen.iXLen(iXLen, <2 x i32>, <2 x i32>, iXLen, iXLen)
+
+define <4 x i32> @test_sf_vc_v_ivv_e32m2(<4 x i32> %vd, <4 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.ivv.nxv4i32.iXLen.iXLen.iXLen(iXLen 3, <4 x i32> %vd, <4 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <4 x i32> %0
+}
+
+declare <4 x i32> @llvm.riscv.sf.vc.v.ivv.nxv4i32.iXLen.iXLen.iXLen(iXLen, <4 x i32>, <4 x i32>, iXLen, iXLen)
+
+define <8 x i32> @test_sf_vc_v_ivv_e32m4(<8 x i32> %vd, <8 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.ivv.nxv8i32.iXLen.iXLen.iXLen(iXLen 3, <8 x i32> %vd, <8 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <8 x i32> %0
+}
+
+declare <8 x i32> @llvm.riscv.sf.vc.v.ivv.nxv8i32.iXLen.iXLen.iXLen(iXLen, <8 x i32>, <8 x i32>, iXLen, iXLen)
+
+define <16 x i32> @test_sf_vc_v_ivv_e32m8(<16 x i32> %vd, <16 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.ivv.nxv16i32.iXLen.iXLen.iXLen(iXLen 3, <16 x i32> %vd, <16 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <16 x i32> %0
+}
+
+declare <16 x i32> @llvm.riscv.sf.vc.v.ivv.nxv16i32.iXLen.iXLen.iXLen(iXLen, <16 x i32>, <16 x i32>, iXLen, iXLen)
+
+define <1 x i64> @test_sf_vc_v_ivv_e64m1(<1 x i64> %vd, <1 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.ivv.nxv1i64.iXLen.iXLen.iXLen(iXLen 3, <1 x i64> %vd, <1 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret <1 x i64> %0
+}
+
+declare <1 x i64> @llvm.riscv.sf.vc.v.ivv.nxv1i64.iXLen.iXLen.iXLen(iXLen, <1 x i64>, <1 x i64>, iXLen, iXLen)
+
+define <2 x i64> @test_sf_vc_v_ivv_e64m2(<2 x i64> %vd, <2 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.ivv.nxv2i64.iXLen.iXLen.iXLen(iXLen 3, <2 x i64> %vd, <2 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret <2 x i64> %0
+}
+
+declare <2 x i64> @llvm.riscv.sf.vc.v.ivv.nxv2i64.iXLen.iXLen.iXLen(iXLen, <2 x i64>, <2 x i64>, iXLen, iXLen)
+
+define <4 x i64> @test_sf_vc_v_ivv_e64m4(<4 x i64> %vd, <4 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.ivv.nxv4i64.iXLen.iXLen.iXLen(iXLen 3, <4 x i64> %vd, <4 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret <4 x i64> %0
+}
+
+declare <4 x i64> @llvm.riscv.sf.vc.v.ivv.nxv4i64.iXLen.iXLen.iXLen(iXLen, <4 x i64>, <4 x i64>, iXLen, iXLen)
+
+define <8 x i64> @test_sf_vc_v_ivv_e64m8(<8 x i64> %vd, <8 x i64> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivv_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.ivv.nxv8i64.iXLen.iXLen.iXLen(iXLen 3, <8 x i64> %vd, <8 x i64> %vs2, iXLen 10, iXLen %vl)
+ ret <8 x i64> %0
+}
+
+declare <8 x i64> @llvm.riscv.sf.vc.v.ivv.nxv8i64.iXLen.iXLen.iXLen(iXLen, <8 x i64>, <8 x i64>, iXLen, iXLen)
+
+define void @test_sf_vc_fvv_se_e16mf4(<1 x i16> %vd, <1 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i16.f16.iXLen(iXLen 1, <1 x i16> %vd, <1 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i16.f16.iXLen(iXLen, <1 x i16>, <1 x i16>, half, iXLen)
+
+define void @test_sf_vc_fvv_se_e16mf2(<2 x i16> %vd, <2 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i16.f16.iXLen(iXLen 1, <2 x i16> %vd, <2 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i16.f16.iXLen(iXLen, <2 x i16>, <2 x i16>, half, iXLen)
+
+define void @test_sf_vc_fvv_se_e16m1(<4 x i16> %vd, <4 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i16.f16.iXLen(iXLen 1, <4 x i16> %vd, <4 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i16.f16.iXLen(iXLen, <4 x i16>, <4 x i16>, half, iXLen)
+
+define void @test_sf_vc_fvv_se_e16m2(<8 x i16> %vd, <8 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i16.f16.iXLen(iXLen 1, <8 x i16> %vd, <8 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i16.f16.iXLen(iXLen, <8 x i16>, <8 x i16>, half, iXLen)
+
+define void @test_sf_vc_fvv_se_e16m4(<16 x i16> %vd, <16 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.fvv 1, v8, v10, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16i16.f16.iXLen(iXLen 1, <16 x i16> %vd, <16 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16i16.f16.iXLen(iXLen, <16 x i16>, <16 x i16>, half, iXLen)
+
+define void @test_sf_vc_fvv_se_e16m8(<32 x i16> %vd, <32 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.fvv 1, v8, v12, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32i16.f16.iXLen(iXLen 1, <32 x i16> %vd, <32 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32i16.f16.iXLen(iXLen, <32 x i16>, <32 x i16>, half, iXLen)
+
+define void @test_sf_vc_fvv_se_e32mf2(<1 x i32> %vd, <1 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i32.f32.iXLen(iXLen 1, <1 x i32> %vd, <1 x i32> %vs2, float %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i32.f32.iXLen(iXLen, <1 x i32>, <1 x i32>, float, iXLen)
+
+define void @test_sf_vc_fvv_se_e32m1(<2 x i32> %vd, <2 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i32.f32.iXLen(iXLen 1, <2 x i32> %vd, <2 x i32> %vs2, float %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i32.f32.iXLen(iXLen, <2 x i32>, <2 x i32>, float, iXLen)
+
+define void @test_sf_vc_fvv_se_e32m2(<4 x i32> %vd, <4 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i32.f32.iXLen(iXLen 1, <4 x i32> %vd, <4 x i32> %vs2, float %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i32.f32.iXLen(iXLen, <4 x i32>, <4 x i32>, float, iXLen)
+
+define void @test_sf_vc_fvv_se_e32m4(<8 x i32> %vd, <8 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.fvv 1, v8, v10, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i32.f32.iXLen(iXLen 1, <8 x i32> %vd, <8 x i32> %vs2, float %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i32.f32.iXLen(iXLen, <8 x i32>, <8 x i32>, float, iXLen)
+
+define void @test_sf_vc_fvv_se_e32m8(<16 x i32> %vd, <16 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.fvv 1, v8, v12, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16i32.f32.iXLen(iXLen 1, <16 x i32> %vd, <16 x i32> %vs2, float %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16i32.f32.iXLen(iXLen, <16 x i32>, <16 x i32>, float, iXLen)
+
+define void @test_sf_vc_fvv_se_e64m1(<1 x i64> %vd, <1 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i64.f64.iXLen(iXLen 1, <1 x i64> %vd, <1 x i64> %vs2, double %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i64.f64.iXLen(iXLen, <1 x i64>, <1 x i64>, double, iXLen)
+
+define void @test_sf_vc_fvv_se_e64m2(<2 x i64> %vd, <2 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i64.f64.iXLen(iXLen 1, <2 x i64> %vd, <2 x i64> %vs2, double %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i64.f64.iXLen(iXLen, <2 x i64>, <2 x i64>, double, iXLen)
+
+define void @test_sf_vc_fvv_se_e64m4(<4 x i64> %vd, <4 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.fvv 1, v8, v10, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i64.f64.iXLen(iXLen 1, <4 x i64> %vd, <4 x i64> %vs2, double %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i64.f64.iXLen(iXLen, <4 x i64>, <4 x i64>, double, iXLen)
+
+define void @test_sf_vc_fvv_se_e64m8(<8 x i64> %vd, <8 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvv_se_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.fvv 1, v8, v12, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i64.f64.iXLen(iXLen 1, <8 x i64> %vd, <8 x i64> %vs2, double %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i64.f64.iXLen(iXLen, <8 x i64>, <8 x i64>, double, iXLen)
+
+define <1 x i16> @test_sf_vc_v_fvv_se_e16mf4(<1 x i16> %vd, <1 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv1i16.iXLen.f16.iXLen(iXLen 1, <1 x i16> %vd, <1 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <1 x i16> %0
+}
+
+declare <1 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv1i16.iXLen.f16.iXLen(iXLen, <1 x i16>, <1 x i16>, half, iXLen)
+
+define <2 x i16> @test_sf_vc_v_fvv_se_e16mf2(<2 x i16> %vd, <2 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv2i16.iXLen.f16.iXLen(iXLen 1, <2 x i16> %vd, <2 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <2 x i16> %0
+}
+
+declare <2 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv2i16.iXLen.f16.iXLen(iXLen, <2 x i16>, <2 x i16>, half, iXLen)
+
+define <4 x i16> @test_sf_vc_v_fvv_se_e16m1(<4 x i16> %vd, <4 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv4i16.iXLen.f16.iXLen(iXLen 1, <4 x i16> %vd, <4 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <4 x i16> %0
+}
+
+declare <4 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv4i16.iXLen.f16.iXLen(iXLen, <4 x i16>, <4 x i16>, half, iXLen)
+
+define <8 x i16> @test_sf_vc_v_fvv_se_e16m2(<8 x i16> %vd, <8 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv8i16.iXLen.f16.iXLen(iXLen 1, <8 x i16> %vd, <8 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <8 x i16> %0
+}
+
+declare <8 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv8i16.iXLen.f16.iXLen(iXLen, <8 x i16>, <8 x i16>, half, iXLen)
+
+define <16 x i16> @test_sf_vc_v_fvv_se_e16m4(<16 x i16> %vd, <16 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv16i16.iXLen.f16.iXLen(iXLen 1, <16 x i16> %vd, <16 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <16 x i16> %0
+}
+
+declare <16 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv16i16.iXLen.f16.iXLen(iXLen, <16 x i16>, <16 x i16>, half, iXLen)
+
+define <32 x i16> @test_sf_vc_v_fvv_se_e16m8(<32 x i16> %vd, <32 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv32i16.iXLen.f16.iXLen(iXLen 1, <32 x i16> %vd, <32 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <32 x i16> %0
+}
+
+declare <32 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv32i16.iXLen.f16.iXLen(iXLen, <32 x i16>, <32 x i16>, half, iXLen)
+
+define <1 x i32> @test_sf_vc_v_fvv_se_e32mf2(<1 x i32> %vd, <1 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv1i32.iXLen.f32.iXLen(iXLen 1, <1 x i32> %vd, <1 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <1 x i32> %0
+}
+
+declare <1 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv1i32.iXLen.f32.iXLen(iXLen, <1 x i32>, <1 x i32>, float, iXLen)
+
+define <2 x i32> @test_sf_vc_v_fvv_se_e32m1(<2 x i32> %vd, <2 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv2i32.iXLen.f32.iXLen(iXLen 1, <2 x i32> %vd, <2 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <2 x i32> %0
+}
+
+declare <2 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv2i32.iXLen.f32.iXLen(iXLen, <2 x i32>, <2 x i32>, float, iXLen)
+
+define <4 x i32> @test_sf_vc_v_fvv_se_e32m2(<4 x i32> %vd, <4 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv4i32.iXLen.f32.iXLen(iXLen 1, <4 x i32> %vd, <4 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <4 x i32> %0
+}
+
+declare <4 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv4i32.iXLen.f32.iXLen(iXLen, <4 x i32>, <4 x i32>, float, iXLen)
+
+define <8 x i32> @test_sf_vc_v_fvv_se_e32m4(<8 x i32> %vd, <8 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv8i32.iXLen.f32.iXLen(iXLen 1, <8 x i32> %vd, <8 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <8 x i32> %0
+}
+
+declare <8 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv8i32.iXLen.f32.iXLen(iXLen, <8 x i32>, <8 x i32>, float, iXLen)
+
+define <16 x i32> @test_sf_vc_v_fvv_se_e32m8(<16 x i32> %vd, <16 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv16i32.iXLen.f32.iXLen(iXLen 1, <16 x i32> %vd, <16 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <16 x i32> %0
+}
+
+declare <16 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv16i32.iXLen.f32.iXLen(iXLen, <16 x i32>, <16 x i32>, float, iXLen)
+
+define <1 x i64> @test_sf_vc_v_fvv_se_e64m1(<1 x i64> %vd, <1 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv1i64.iXLen.f64.iXLen(iXLen 1, <1 x i64> %vd, <1 x i64> %vs2, double %fs1, iXLen %vl)
+ ret <1 x i64> %0
+}
+
+declare <1 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv1i64.iXLen.f64.iXLen(iXLen, <1 x i64>, <1 x i64>, double, iXLen)
+
+define <2 x i64> @test_sf_vc_v_fvv_se_e64m2(<2 x i64> %vd, <2 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv2i64.iXLen.f64.iXLen(iXLen 1, <2 x i64> %vd, <2 x i64> %vs2, double %fs1, iXLen %vl)
+ ret <2 x i64> %0
+}
+
+declare <2 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv2i64.iXLen.f64.iXLen(iXLen, <2 x i64>, <2 x i64>, double, iXLen)
+
+define <4 x i64> @test_sf_vc_v_fvv_se_e64m4(<4 x i64> %vd, <4 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv4i64.iXLen.f64.iXLen(iXLen 1, <4 x i64> %vd, <4 x i64> %vs2, double %fs1, iXLen %vl)
+ ret <4 x i64> %0
+}
+
+declare <4 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv4i64.iXLen.f64.iXLen(iXLen, <4 x i64>, <4 x i64>, double, iXLen)
+
+define <8 x i64> @test_sf_vc_v_fvv_se_e64m8(<8 x i64> %vd, <8 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv8i64.iXLen.f64.iXLen(iXLen 1, <8 x i64> %vd, <8 x i64> %vs2, double %fs1, iXLen %vl)
+ ret <8 x i64> %0
+}
+
+declare <8 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv8i64.iXLen.f64.iXLen(iXLen, <8 x i64>, <8 x i64>, double, iXLen)
+
+define <1 x i16> @test_sf_vc_v_fvv_e16mf4(<1 x i16> %vd, <1 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.fvv.nxv1i16.iXLen.f16.iXLen(iXLen 1, <1 x i16> %vd, <1 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <1 x i16> %0
+}
+
+declare <1 x i16> @llvm.riscv.sf.vc.v.fvv.nxv1i16.iXLen.f16.iXLen(iXLen, <1 x i16>, <1 x i16>, half, iXLen)
+
+define <2 x i16> @test_sf_vc_v_fvv_e16mf2(<2 x i16> %vd, <2 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.fvv.nxv2i16.iXLen.f16.iXLen(iXLen 1, <2 x i16> %vd, <2 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <2 x i16> %0
+}
+
+declare <2 x i16> @llvm.riscv.sf.vc.v.fvv.nxv2i16.iXLen.f16.iXLen(iXLen, <2 x i16>, <2 x i16>, half, iXLen)
+
+define <4 x i16> @test_sf_vc_v_fvv_e16m1(<4 x i16> %vd, <4 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.fvv.nxv4i16.iXLen.f16.iXLen(iXLen 1, <4 x i16> %vd, <4 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <4 x i16> %0
+}
+
+declare <4 x i16> @llvm.riscv.sf.vc.v.fvv.nxv4i16.iXLen.f16.iXLen(iXLen, <4 x i16>, <4 x i16>, half, iXLen)
+
+define <8 x i16> @test_sf_vc_v_fvv_e16m2(<8 x i16> %vd, <8 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.fvv.nxv8i16.iXLen.f16.iXLen(iXLen 1, <8 x i16> %vd, <8 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <8 x i16> %0
+}
+
+declare <8 x i16> @llvm.riscv.sf.vc.v.fvv.nxv8i16.iXLen.f16.iXLen(iXLen, <8 x i16>, <8 x i16>, half, iXLen)
+
+define <16 x i16> @test_sf_vc_v_fvv_e16m4(<16 x i16> %vd, <16 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.fvv.nxv16i16.iXLen.f16.iXLen(iXLen 1, <16 x i16> %vd, <16 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <16 x i16> %0
+}
+
+declare <16 x i16> @llvm.riscv.sf.vc.v.fvv.nxv16i16.iXLen.f16.iXLen(iXLen, <16 x i16>, <16 x i16>, half, iXLen)
+
+define <32 x i16> @test_sf_vc_v_fvv_e16m8(<32 x i16> %vd, <32 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.fvv.nxv32i16.iXLen.f16.iXLen(iXLen 1, <32 x i16> %vd, <32 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <32 x i16> %0
+}
+
+declare <32 x i16> @llvm.riscv.sf.vc.v.fvv.nxv32i16.iXLen.f16.iXLen(iXLen, <32 x i16>, <32 x i16>, half, iXLen)
+
+define <1 x i32> @test_sf_vc_v_fvv_e32mf2(<1 x i32> %vd, <1 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.fvv.nxv1i32.iXLen.f32.iXLen(iXLen 1, <1 x i32> %vd, <1 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <1 x i32> %0
+}
+
+declare <1 x i32> @llvm.riscv.sf.vc.v.fvv.nxv1i32.iXLen.f32.iXLen(iXLen, <1 x i32>, <1 x i32>, float, iXLen)
+
+define <2 x i32> @test_sf_vc_v_fvv_e32m1(<2 x i32> %vd, <2 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.fvv.nxv2i32.iXLen.f32.iXLen(iXLen 1, <2 x i32> %vd, <2 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <2 x i32> %0
+}
+
+declare <2 x i32> @llvm.riscv.sf.vc.v.fvv.nxv2i32.iXLen.f32.iXLen(iXLen, <2 x i32>, <2 x i32>, float, iXLen)
+
+define <4 x i32> @test_sf_vc_v_fvv_e32m2(<4 x i32> %vd, <4 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.fvv.nxv4i32.iXLen.f32.iXLen(iXLen 1, <4 x i32> %vd, <4 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <4 x i32> %0
+}
+
+declare <4 x i32> @llvm.riscv.sf.vc.v.fvv.nxv4i32.iXLen.f32.iXLen(iXLen, <4 x i32>, <4 x i32>, float, iXLen)
+
+define <8 x i32> @test_sf_vc_v_fvv_e32m4(<8 x i32> %vd, <8 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.fvv.nxv8i32.iXLen.f32.iXLen(iXLen 1, <8 x i32> %vd, <8 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <8 x i32> %0
+}
+
+declare <8 x i32> @llvm.riscv.sf.vc.v.fvv.nxv8i32.iXLen.f32.iXLen(iXLen, <8 x i32>, <8 x i32>, float, iXLen)
+
+define <16 x i32> @test_sf_vc_v_fvv_e32m8(<16 x i32> %vd, <16 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.fvv.nxv16i32.iXLen.f32.iXLen(iXLen 1, <16 x i32> %vd, <16 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <16 x i32> %0
+}
+
+declare <16 x i32> @llvm.riscv.sf.vc.v.fvv.nxv16i32.iXLen.f32.iXLen(iXLen, <16 x i32>, <16 x i32>, float, iXLen)
+
+define <1 x i64> @test_sf_vc_v_fvv_e64m1(<1 x i64> %vd, <1 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e64m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.fvv.nxv1i64.iXLen.f64.iXLen(iXLen 1, <1 x i64> %vd, <1 x i64> %vs2, double %fs1, iXLen %vl)
+ ret <1 x i64> %0
+}
+
+declare <1 x i64> @llvm.riscv.sf.vc.v.fvv.nxv1i64.iXLen.f64.iXLen(iXLen, <1 x i64>, <1 x i64>, double, iXLen)
+
+define <2 x i64> @test_sf_vc_v_fvv_e64m2(<2 x i64> %vd, <2 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e64m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.fvv.nxv2i64.iXLen.f64.iXLen(iXLen 1, <2 x i64> %vd, <2 x i64> %vs2, double %fs1, iXLen %vl)
+ ret <2 x i64> %0
+}
+
+declare <2 x i64> @llvm.riscv.sf.vc.v.fvv.nxv2i64.iXLen.f64.iXLen(iXLen, <2 x i64>, <2 x i64>, double, iXLen)
+
+define <4 x i64> @test_sf_vc_v_fvv_e64m4(<4 x i64> %vd, <4 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e64m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.fvv.nxv4i64.iXLen.f64.iXLen(iXLen 1, <4 x i64> %vd, <4 x i64> %vs2, double %fs1, iXLen %vl)
+ ret <4 x i64> %0
+}
+
+declare <4 x i64> @llvm.riscv.sf.vc.v.fvv.nxv4i64.iXLen.f64.iXLen(iXLen, <4 x i64>, <4 x i64>, double, iXLen)
+
+define <8 x i64> @test_sf_vc_v_fvv_e64m8(<8 x i64> %vd, <8 x i64> %vs2, double %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvv_e64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.fvv.nxv8i64.iXLen.f64.iXLen(iXLen 1, <8 x i64> %vd, <8 x i64> %vs2, double %fs1, iXLen %vl)
+ ret <8 x i64> %0
+}
+
+declare <8 x i64> @llvm.riscv.sf.vc.v.fvv.nxv8i64.iXLen.f64.iXLen(iXLen, <8 x i64>, <8 x i64>, double, iXLen)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xvw.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xvw.ll
new file mode 100644
index 000000000000000..86257ead512c2ef
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xvw.ll
@@ -0,0 +1,2111 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+xsfvcp \
+; RUN: -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+xsfvcp \
+; RUN: -verify-machineinstrs | FileCheck %s
+
+define void @test_sf_vc_vvw_se_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i16.nxv1i8.nxv1i8.iXLen(iXLen 3, <1 x i16> %vd, <1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i16.nxv1i8.nxv1i8.iXLen(iXLen, <1 x i16>, <1 x i8>, <1 x i8>, iXLen)
+
+define void @test_sf_vc_vvw_se_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i16.nxv2i8.nxv2i8.iXLen(iXLen 3, <2 x i16> %vd, <2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i16.nxv2i8.nxv2i8.iXLen(iXLen, <2 x i16>, <2 x i8>, <2 x i8>, iXLen)
+
+define void @test_sf_vc_vvw_se_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i16.nxv4i8.nxv4i8.iXLen(iXLen 3, <4 x i16> %vd, <4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i16.nxv4i8.nxv4i8.iXLen(iXLen, <4 x i16>, <4 x i8>, <4 x i8>, iXLen)
+
+define void @test_sf_vc_vvw_se_e8m1(<8 x i16> %vd, <8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i16.nxv8i8.nxv8i8.iXLen(iXLen 3, <8 x i16> %vd, <8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i16.nxv8i8.nxv8i8.iXLen(iXLen, <8 x i16>, <8 x i8>, <8 x i8>, iXLen)
+
+define void @test_sf_vc_vvw_se_e8m2(<16 x i16> %vd, <16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.vvw 3, v8, v10, v11
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16i16.nxv16i8.nxv16i8.iXLen(iXLen 3, <16 x i16> %vd, <16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16i16.nxv16i8.nxv16i8.iXLen(iXLen, <16 x i16>, <16 x i8>, <16 x i8>, iXLen)
+
+define void @test_sf_vc_vvw_se_e8m4(<32 x i16> %vd, <32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.vvw 3, v8, v12, v14
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv32i16.nxv32i8.nxv32i8.iXLen(iXLen 3, <32 x i16> %vd, <32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv32i16.nxv32i8.nxv32i8.iXLen(iXLen, <32 x i16>, <32 x i8>, <32 x i8>, iXLen)
+
+define void @test_sf_vc_vvw_se_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i32.nxv1i16.nxv1i16.iXLen(iXLen 3, <1 x i32> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i32.nxv1i16.nxv1i16.iXLen(iXLen, <1 x i32>, <1 x i16>, <1 x i16>, iXLen)
+
+define void @test_sf_vc_vvw_se_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i32.nxv2i16.nxv2i16.iXLen(iXLen 3, <2 x i32> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i32.nxv2i16.nxv2i16.iXLen(iXLen, <2 x i32>, <2 x i16>, <2 x i16>, iXLen)
+
+define void @test_sf_vc_vvw_se_e16m1(<4 x i32> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i32.nxv4i16.nxv4i16.iXLen(iXLen 3, <4 x i32> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i32.nxv4i16.nxv4i16.iXLen(iXLen, <4 x i32>, <4 x i16>, <4 x i16>, iXLen)
+
+define void @test_sf_vc_vvw_se_e16m2(<8 x i32> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.vvw 3, v8, v10, v11
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i32.nxv8i16.nxv8i16.iXLen(iXLen 3, <8 x i32> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i32.nxv8i16.nxv8i16.iXLen(iXLen, <8 x i32>, <8 x i16>, <8 x i16>, iXLen)
+
+define void @test_sf_vc_vvw_se_e16m4(<16 x i32> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.vvw 3, v8, v12, v14
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16i32.nxv16i16.nxv16i16.iXLen(iXLen 3, <16 x i32> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16i32.nxv16i16.nxv16i16.iXLen(iXLen, <16 x i32>, <16 x i16>, <16 x i16>, iXLen)
+
+define void @test_sf_vc_vvw_se_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i64.nxv1i32.nxv1i32.iXLen(iXLen 3, <1 x i64> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i64.nxv1i32.nxv1i32.iXLen(iXLen, <1 x i64>, <1 x i32>, <1 x i32>, iXLen)
+
+define void @test_sf_vc_vvw_se_e32m1(<2 x i64> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i64.nxv2i32.nxv2i32.iXLen(iXLen 3, <2 x i64> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i64.nxv2i32.nxv2i32.iXLen(iXLen, <2 x i64>, <2 x i32>, <2 x i32>, iXLen)
+
+define void @test_sf_vc_vvw_se_e32m2(<4 x i64> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.vvw 3, v8, v10, v11
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i64.nxv4i32.nxv4i32.iXLen(iXLen 3, <4 x i64> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i64.nxv4i32.nxv4i32.iXLen(iXLen, <4 x i64>, <4 x i32>, <4 x i32>, iXLen)
+
+define void @test_sf_vc_vvw_se_e32m4(<8 x i64> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_vvw_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.vvw 3, v8, v12, v14
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i64.nxv8i32.nxv8i32.iXLen(iXLen 3, <8 x i64> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i64.nxv8i32.nxv8i32.iXLen(iXLen, <8 x i64>, <8 x i32>, <8 x i32>, iXLen)
+
+define <1 x i16> @test_sf_vc_v_vvw_se_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv1i16.iXLen.nxv1i8.nxv1i8.iXLen(iXLen 3, <1 x i16> %vd, <1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl)
+ ret <1 x i16> %0
+}
+
+declare <1 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv1i16.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, <1 x i16>, <1 x i8>, <1 x i8>, iXLen)
+
+define <2 x i16> @test_sf_vc_v_vvw_se_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv2i16.iXLen.nxv2i8.nxv2i8.iXLen(iXLen 3, <2 x i16> %vd, <2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl)
+ ret <2 x i16> %0
+}
+
+declare <2 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv2i16.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, <2 x i16>, <2 x i8>, <2 x i8>, iXLen)
+
+define <4 x i16> @test_sf_vc_v_vvw_se_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv4i16.iXLen.nxv4i8.nxv4i8.iXLen(iXLen 3, <4 x i16> %vd, <4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl)
+ ret <4 x i16> %0
+}
+
+declare <4 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv4i16.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, <4 x i16>, <4 x i8>, <4 x i8>, iXLen)
+
+define <8 x i16> @test_sf_vc_v_vvw_se_e8m1(<8 x i16> %vd, <8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv8i16.iXLen.nxv8i8.nxv8i8.iXLen(iXLen 3, <8 x i16> %vd, <8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl)
+ ret <8 x i16> %0
+}
+
+declare <8 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv8i16.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, <8 x i16>, <8 x i8>, <8 x i8>, iXLen)
+
+define <16 x i16> @test_sf_vc_v_vvw_se_e8m2(<16 x i16> %vd, <16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv16i16.iXLen.nxv16i8.nxv16i8.iXLen(iXLen 3, <16 x i16> %vd, <16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl)
+ ret <16 x i16> %0
+}
+
+declare <16 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv16i16.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, <16 x i16>, <16 x i8>, <16 x i8>, iXLen)
+
+define <32 x i16> @test_sf_vc_v_vvw_se_e8m4(<32 x i16> %vd, <32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv32i16.iXLen.nxv32i8.nxv32i8.iXLen(iXLen 3, <32 x i16> %vd, <32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl)
+ ret <32 x i16> %0
+}
+
+declare <32 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv32i16.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, <32 x i16>, <32 x i8>, <32 x i8>, iXLen)
+
+define <1 x i32> @test_sf_vc_v_vvw_se_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv1i32.iXLen.nxv1i16.nxv1i16.iXLen(iXLen 3, <1 x i32> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl)
+ ret <1 x i32> %0
+}
+
+declare <1 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv1i32.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, <1 x i32>, <1 x i16>, <1 x i16>, iXLen)
+
+define <2 x i32> @test_sf_vc_v_vvw_se_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv2i32.iXLen.nxv2i16.nxv2i16.iXLen(iXLen 3, <2 x i32> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl)
+ ret <2 x i32> %0
+}
+
+declare <2 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv2i32.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, <2 x i32>, <2 x i16>, <2 x i16>, iXLen)
+
+define <4 x i32> @test_sf_vc_v_vvw_se_e16m1(<4 x i32> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv4i32.iXLen.nxv4i16.nxv4i16.iXLen(iXLen 3, <4 x i32> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl)
+ ret <4 x i32> %0
+}
+
+declare <4 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv4i32.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, <4 x i32>, <4 x i16>, <4 x i16>, iXLen)
+
+define <8 x i32> @test_sf_vc_v_vvw_se_e16m2(<8 x i32> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv8i32.iXLen.nxv8i16.nxv8i16.iXLen(iXLen 3, <8 x i32> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl)
+ ret <8 x i32> %0
+}
+
+declare <8 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv8i32.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, <8 x i32>, <8 x i16>, <8 x i16>, iXLen)
+
+define <16 x i32> @test_sf_vc_v_vvw_se_e16m4(<16 x i32> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv16i32.iXLen.nxv16i16.nxv16i16.iXLen(iXLen 3, <16 x i32> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl)
+ ret <16 x i32> %0
+}
+
+declare <16 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv16i32.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, <16 x i32>, <16 x i16>, <16 x i16>, iXLen)
+
+define <1 x i64> @test_sf_vc_v_vvw_se_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv1i64.iXLen.nxv1i32.nxv1i32.iXLen(iXLen 3, <1 x i64> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl)
+ ret <1 x i64> %0
+}
+
+declare <1 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv1i64.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, <1 x i64>, <1 x i32>, <1 x i32>, iXLen)
+
+define <2 x i64> @test_sf_vc_v_vvw_se_e32m1(<2 x i64> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv2i64.iXLen.nxv2i32.nxv2i32.iXLen(iXLen 3, <2 x i64> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl)
+ ret <2 x i64> %0
+}
+
+declare <2 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv2i64.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, <2 x i64>, <2 x i32>, <2 x i32>, iXLen)
+
+define <4 x i64> @test_sf_vc_v_vvw_se_e32m2(<4 x i64> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv4i64.iXLen.nxv4i32.nxv4i32.iXLen(iXLen 3, <4 x i64> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl)
+ ret <4 x i64> %0
+}
+
+declare <4 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv4i64.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, <4 x i64>, <4 x i32>, <4 x i32>, iXLen)
+
+define <8 x i64> @test_sf_vc_v_vvw_se_e32m4(<8 x i64> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv8i64.iXLen.nxv8i32.nxv8i32.iXLen(iXLen 3, <8 x i64> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl)
+ ret <8 x i64> %0
+}
+
+declare <8 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv8i64.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, <8 x i64>, <8 x i32>, <8 x i32>, iXLen)
+
+define <1 x i16> @test_sf_vc_v_vvw_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.vvw.nxv1i16.iXLen.nxv1i8.nxv1i8.iXLen(iXLen 3, <1 x i16> %vd, <1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl)
+ ret <1 x i16> %0
+}
+
+declare <1 x i16> @llvm.riscv.sf.vc.v.vvw.nxv1i16.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, <1 x i16>, <1 x i8>, <1 x i8>, iXLen)
+
+define <2 x i16> @test_sf_vc_v_vvw_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.vvw.nxv2i16.iXLen.nxv2i8.nxv2i8.iXLen(iXLen 3, <2 x i16> %vd, <2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl)
+ ret <2 x i16> %0
+}
+
+declare <2 x i16> @llvm.riscv.sf.vc.v.vvw.nxv2i16.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, <2 x i16>, <2 x i8>, <2 x i8>, iXLen)
+
+define <4 x i16> @test_sf_vc_v_vvw_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.vvw.nxv4i16.iXLen.nxv4i8.nxv4i8.iXLen(iXLen 3, <4 x i16> %vd, <4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl)
+ ret <4 x i16> %0
+}
+
+declare <4 x i16> @llvm.riscv.sf.vc.v.vvw.nxv4i16.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, <4 x i16>, <4 x i8>, <4 x i8>, iXLen)
+
+define <8 x i16> @test_sf_vc_v_vvw_e8m1(<8 x i16> %vd, <8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.vvw.nxv8i16.iXLen.nxv8i8.nxv8i8.iXLen(iXLen 3, <8 x i16> %vd, <8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl)
+ ret <8 x i16> %0
+}
+
+declare <8 x i16> @llvm.riscv.sf.vc.v.vvw.nxv8i16.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, <8 x i16>, <8 x i8>, <8 x i8>, iXLen)
+
+define <16 x i16> @test_sf_vc_v_vvw_e8m2(<16 x i16> %vd, <16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.vvw.nxv16i16.iXLen.nxv16i8.nxv16i8.iXLen(iXLen 3, <16 x i16> %vd, <16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl)
+ ret <16 x i16> %0
+}
+
+declare <16 x i16> @llvm.riscv.sf.vc.v.vvw.nxv16i16.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, <16 x i16>, <16 x i8>, <16 x i8>, iXLen)
+
+define <32 x i16> @test_sf_vc_v_vvw_e8m4(<32 x i16> %vd, <32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.vvw.nxv32i16.iXLen.nxv32i8.nxv32i8.iXLen(iXLen 3, <32 x i16> %vd, <32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl)
+ ret <32 x i16> %0
+}
+
+declare <32 x i16> @llvm.riscv.sf.vc.v.vvw.nxv32i16.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, <32 x i16>, <32 x i8>, <32 x i8>, iXLen)
+
+define <1 x i32> @test_sf_vc_v_vvw_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.vvw.nxv1i32.iXLen.nxv1i16.nxv1i16.iXLen(iXLen 3, <1 x i32> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl)
+ ret <1 x i32> %0
+}
+
+declare <1 x i32> @llvm.riscv.sf.vc.v.vvw.nxv1i32.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, <1 x i32>, <1 x i16>, <1 x i16>, iXLen)
+
+define <2 x i32> @test_sf_vc_v_vvw_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.vvw.nxv2i32.iXLen.nxv2i16.nxv2i16.iXLen(iXLen 3, <2 x i32> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl)
+ ret <2 x i32> %0
+}
+
+declare <2 x i32> @llvm.riscv.sf.vc.v.vvw.nxv2i32.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, <2 x i32>, <2 x i16>, <2 x i16>, iXLen)
+
+define <4 x i32> @test_sf_vc_v_vvw_e16m1(<4 x i32> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.vvw.nxv4i32.iXLen.nxv4i16.nxv4i16.iXLen(iXLen 3, <4 x i32> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl)
+ ret <4 x i32> %0
+}
+
+declare <4 x i32> @llvm.riscv.sf.vc.v.vvw.nxv4i32.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, <4 x i32>, <4 x i16>, <4 x i16>, iXLen)
+
+define <8 x i32> @test_sf_vc_v_vvw_e16m2(<8 x i32> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.vvw.nxv8i32.iXLen.nxv8i16.nxv8i16.iXLen(iXLen 3, <8 x i32> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl)
+ ret <8 x i32> %0
+}
+
+declare <8 x i32> @llvm.riscv.sf.vc.v.vvw.nxv8i32.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, <8 x i32>, <8 x i16>, <8 x i16>, iXLen)
+
+define <16 x i32> @test_sf_vc_v_vvw_e16m4(<16 x i32> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.vvw.nxv16i32.iXLen.nxv16i16.nxv16i16.iXLen(iXLen 3, <16 x i32> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl)
+ ret <16 x i32> %0
+}
+
+declare <16 x i32> @llvm.riscv.sf.vc.v.vvw.nxv16i32.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, <16 x i32>, <16 x i16>, <16 x i16>, iXLen)
+
+define <1 x i64> @test_sf_vc_v_vvw_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.vvw.nxv1i64.iXLen.nxv1i32.nxv1i32.iXLen(iXLen 3, <1 x i64> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl)
+ ret <1 x i64> %0
+}
+
+declare <1 x i64> @llvm.riscv.sf.vc.v.vvw.nxv1i64.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, <1 x i64>, <1 x i32>, <1 x i32>, iXLen)
+
+define <2 x i64> @test_sf_vc_v_vvw_e32m1(<2 x i64> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.vvw.nxv2i64.iXLen.nxv2i32.nxv2i32.iXLen(iXLen 3, <2 x i64> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl)
+ ret <2 x i64> %0
+}
+
+declare <2 x i64> @llvm.riscv.sf.vc.v.vvw.nxv2i64.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, <2 x i64>, <2 x i32>, <2 x i32>, iXLen)
+
+define <4 x i64> @test_sf_vc_v_vvw_e32m2(<4 x i64> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.vvw.nxv4i64.iXLen.nxv4i32.nxv4i32.iXLen(iXLen 3, <4 x i64> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl)
+ ret <4 x i64> %0
+}
+
+declare <4 x i64> @llvm.riscv.sf.vc.v.vvw.nxv4i64.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, <4 x i64>, <4 x i32>, <4 x i32>, iXLen)
+
+define <8 x i64> @test_sf_vc_v_vvw_e32m4(<8 x i64> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_vvw_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.vvw.nxv8i64.iXLen.nxv8i32.nxv8i32.iXLen(iXLen 3, <8 x i64> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl)
+ ret <8 x i64> %0
+}
+
+declare <8 x i64> @llvm.riscv.sf.vc.v.vvw.nxv8i64.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, <8 x i64>, <8 x i32>, <8 x i32>, iXLen)
+
+define void @test_sf_vc_xvw_se_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i16.nxv1i8.i8.iXLen(iXLen 3, <1 x i16> %vd, <1 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i16.nxv1i8.i8.iXLen(iXLen, <1 x i16>, <1 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvw_se_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i16.nxv2i8.i8.iXLen(iXLen 3, <2 x i16> %vd, <2 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i16.nxv2i8.i8.iXLen(iXLen, <2 x i16>, <2 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvw_se_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i16.nxv4i8.i8.iXLen(iXLen 3, <4 x i16> %vd, <4 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i16.nxv4i8.i8.iXLen(iXLen, <4 x i16>, <4 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvw_se_e8m1(<8 x i16> %vd, <8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i16.nxv8i8.i8.iXLen(iXLen 3, <8 x i16> %vd, <8 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i16.nxv8i8.i8.iXLen(iXLen, <8 x i16>, <8 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvw_se_e8m2(<16 x i16> %vd, <16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.xvw 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16i16.nxv16i8.i8.iXLen(iXLen 3, <16 x i16> %vd, <16 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16i16.nxv16i8.i8.iXLen(iXLen, <16 x i16>, <16 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvw_se_e8m4(<32 x i16> %vd, <32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.xvw 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv32i16.nxv32i8.i8.iXLen(iXLen 3, <32 x i16> %vd, <32 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv32i16.nxv32i8.i8.iXLen(iXLen, <32 x i16>, <32 x i8>, i8, iXLen)
+
+define void @test_sf_vc_xvw_se_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i32.nxv1i16.i16.iXLen(iXLen 3, <1 x i32> %vd, <1 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i32.nxv1i16.i16.iXLen(iXLen, <1 x i32>, <1 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xvw_se_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i32.nxv2i16.i16.iXLen(iXLen 3, <2 x i32> %vd, <2 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i32.nxv2i16.i16.iXLen(iXLen, <2 x i32>, <2 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xvw_se_e16m1(<4 x i32> %vd, <4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i32.nxv4i16.i16.iXLen(iXLen 3, <4 x i32> %vd, <4 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i32.nxv4i16.i16.iXLen(iXLen, <4 x i32>, <4 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xvw_se_e16m2(<8 x i32> %vd, <8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.xvw 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i32.nxv8i16.i16.iXLen(iXLen 3, <8 x i32> %vd, <8 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i32.nxv8i16.i16.iXLen(iXLen, <8 x i32>, <8 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xvw_se_e16m4(<16 x i32> %vd, <16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.xvw 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16i32.nxv16i16.i16.iXLen(iXLen 3, <16 x i32> %vd, <16 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16i32.nxv16i16.i16.iXLen(iXLen, <16 x i32>, <16 x i16>, i16, iXLen)
+
+define void @test_sf_vc_xvw_se_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i64.nxv1i32.i32.iXLen(iXLen 3, <1 x i64> %vd, <1 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i64.nxv1i32.i32.iXLen(iXLen, <1 x i64>, <1 x i32>, i32, iXLen)
+
+define void @test_sf_vc_xvw_se_e32m1(<2 x i64> %vd, <2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i64.nxv2i32.i32.iXLen(iXLen 3, <2 x i64> %vd, <2 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i64.nxv2i32.i32.iXLen(iXLen, <2 x i64>, <2 x i32>, i32, iXLen)
+
+define void @test_sf_vc_xvw_se_e32m2(<4 x i64> %vd, <4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.xvw 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i64.nxv4i32.i32.iXLen(iXLen 3, <4 x i64> %vd, <4 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i64.nxv4i32.i32.iXLen(iXLen, <4 x i64>, <4 x i32>, i32, iXLen)
+
+define void @test_sf_vc_xvw_se_e32m4(<8 x i64> %vd, <8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_xvw_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.xvw 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i64.nxv8i32.i32.iXLen(iXLen 3, <8 x i64> %vd, <8 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i64.nxv8i32.i32.iXLen(iXLen, <8 x i64>, <8 x i32>, i32, iXLen)
+
+define <1 x i16> @test_sf_vc_v_xvw_se_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv1i16.iXLen.nxv1i8.i8.iXLen(iXLen 3, <1 x i16> %vd, <1 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <1 x i16> %0
+}
+
+declare <1 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv1i16.iXLen.nxv1i8.i8.iXLen(iXLen, <1 x i16>, <1 x i8>, i8, iXLen)
+
+define <2 x i16> @test_sf_vc_v_xvw_se_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv2i16.iXLen.nxv2i8.i8.iXLen(iXLen 3, <2 x i16> %vd, <2 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <2 x i16> %0
+}
+
+declare <2 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv2i16.iXLen.nxv2i8.i8.iXLen(iXLen, <2 x i16>, <2 x i8>, i8, iXLen)
+
+define <4 x i16> @test_sf_vc_v_xvw_se_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv4i16.iXLen.nxv4i8.i8.iXLen(iXLen 3, <4 x i16> %vd, <4 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <4 x i16> %0
+}
+
+declare <4 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv4i16.iXLen.nxv4i8.i8.iXLen(iXLen, <4 x i16>, <4 x i8>, i8, iXLen)
+
+define <8 x i16> @test_sf_vc_v_xvw_se_e8m1(<8 x i16> %vd, <8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv8i16.iXLen.nxv8i8.i8.iXLen(iXLen 3, <8 x i16> %vd, <8 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <8 x i16> %0
+}
+
+declare <8 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv8i16.iXLen.nxv8i8.i8.iXLen(iXLen, <8 x i16>, <8 x i8>, i8, iXLen)
+
+define <16 x i16> @test_sf_vc_v_xvw_se_e8m2(<16 x i16> %vd, <16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv16i16.iXLen.nxv16i8.i8.iXLen(iXLen 3, <16 x i16> %vd, <16 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <16 x i16> %0
+}
+
+declare <16 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv16i16.iXLen.nxv16i8.i8.iXLen(iXLen, <16 x i16>, <16 x i8>, i8, iXLen)
+
+define <32 x i16> @test_sf_vc_v_xvw_se_e8m4(<32 x i16> %vd, <32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv32i16.iXLen.nxv32i8.i8.iXLen(iXLen 3, <32 x i16> %vd, <32 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <32 x i16> %0
+}
+
+declare <32 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv32i16.iXLen.nxv32i8.i8.iXLen(iXLen, <32 x i16>, <32 x i8>, i8, iXLen)
+
+define <1 x i32> @test_sf_vc_v_xvw_se_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv1i32.iXLen.nxv1i16.i16.iXLen(iXLen 3, <1 x i32> %vd, <1 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <1 x i32> %0
+}
+
+declare <1 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv1i32.iXLen.nxv1i16.i16.iXLen(iXLen, <1 x i32>, <1 x i16>, i16, iXLen)
+
+define <2 x i32> @test_sf_vc_v_xvw_se_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv2i32.iXLen.nxv2i16.i16.iXLen(iXLen 3, <2 x i32> %vd, <2 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <2 x i32> %0
+}
+
+declare <2 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv2i32.iXLen.nxv2i16.i16.iXLen(iXLen, <2 x i32>, <2 x i16>, i16, iXLen)
+
+define <4 x i32> @test_sf_vc_v_xvw_se_e16m1(<4 x i32> %vd, <4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv4i32.iXLen.nxv4i16.i16.iXLen(iXLen 3, <4 x i32> %vd, <4 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <4 x i32> %0
+}
+
+declare <4 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv4i32.iXLen.nxv4i16.i16.iXLen(iXLen, <4 x i32>, <4 x i16>, i16, iXLen)
+
+define <8 x i32> @test_sf_vc_v_xvw_se_e16m2(<8 x i32> %vd, <8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv8i32.iXLen.nxv8i16.i16.iXLen(iXLen 3, <8 x i32> %vd, <8 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <8 x i32> %0
+}
+
+declare <8 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv8i32.iXLen.nxv8i16.i16.iXLen(iXLen, <8 x i32>, <8 x i16>, i16, iXLen)
+
+define <16 x i32> @test_sf_vc_v_xvw_se_e16m4(<16 x i32> %vd, <16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv16i32.iXLen.nxv16i16.i16.iXLen(iXLen 3, <16 x i32> %vd, <16 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <16 x i32> %0
+}
+
+declare <16 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv16i32.iXLen.nxv16i16.i16.iXLen(iXLen, <16 x i32>, <16 x i16>, i16, iXLen)
+
+define <1 x i64> @test_sf_vc_v_xvw_se_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv1i64.i32.nxv1i32.iXLen.iXLen(iXLen 3, <1 x i64> %vd, <1 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <1 x i64> %0
+}
+
+declare <1 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv1i64.i32.nxv1i32.iXLen.iXLen(iXLen, <1 x i64>, <1 x i32>, i32, iXLen)
+
+define <2 x i64> @test_sf_vc_v_xvw_se_e32m1(<2 x i64> %vd, <2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv2i64.i32.nxv2i32.iXLen.iXLen(iXLen 3, <2 x i64> %vd, <2 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <2 x i64> %0
+}
+
+declare <2 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv2i64.i32.nxv2i32.iXLen.iXLen(iXLen, <2 x i64>, <2 x i32>, i32, iXLen)
+
+define <4 x i64> @test_sf_vc_v_xvw_se_e32m2(<4 x i64> %vd, <4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv4i64.i32.nxv4i32.iXLen.iXLen(iXLen 3, <4 x i64> %vd, <4 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <4 x i64> %0
+}
+
+declare <4 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv4i64.i32.nxv4i32.iXLen.iXLen(iXLen, <4 x i64>, <4 x i32>, i32, iXLen)
+
+define <8 x i64> @test_sf_vc_v_xvw_se_e32m4(<8 x i64> %vd, <8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv8i64.i32.nxv8i32.iXLen.iXLen(iXLen 3, <8 x i64> %vd, <8 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <8 x i64> %0
+}
+
+declare <8 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv8i64.i32.nxv8i32.iXLen.iXLen(iXLen, <8 x i64>, <8 x i32>, i32, iXLen)
+
+define <1 x i16> @test_sf_vc_v_xvw_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.xvw.nxv1i16.iXLen.nxv1i8.i8.iXLen(iXLen 3, <1 x i16> %vd, <1 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <1 x i16> %0
+}
+
+declare <1 x i16> @llvm.riscv.sf.vc.v.xvw.nxv1i16.iXLen.nxv1i8.i8.iXLen(iXLen, <1 x i16>, <1 x i8>, i8, iXLen)
+
+define <2 x i16> @test_sf_vc_v_xvw_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.xvw.nxv2i16.iXLen.nxv2i8.i8.iXLen(iXLen 3, <2 x i16> %vd, <2 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <2 x i16> %0
+}
+
+declare <2 x i16> @llvm.riscv.sf.vc.v.xvw.nxv2i16.iXLen.nxv2i8.i8.iXLen(iXLen, <2 x i16>, <2 x i8>, i8, iXLen)
+
+define <4 x i16> @test_sf_vc_v_xvw_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.xvw.nxv4i16.iXLen.nxv4i8.i8.iXLen(iXLen 3, <4 x i16> %vd, <4 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <4 x i16> %0
+}
+
+declare <4 x i16> @llvm.riscv.sf.vc.v.xvw.nxv4i16.iXLen.nxv4i8.i8.iXLen(iXLen, <4 x i16>, <4 x i8>, i8, iXLen)
+
+define <8 x i16> @test_sf_vc_v_xvw_e8m1(<8 x i16> %vd, <8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.xvw.nxv8i16.iXLen.nxv8i8.i8.iXLen(iXLen 3, <8 x i16> %vd, <8 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <8 x i16> %0
+}
+
+declare <8 x i16> @llvm.riscv.sf.vc.v.xvw.nxv8i16.iXLen.nxv8i8.i8.iXLen(iXLen, <8 x i16>, <8 x i8>, i8, iXLen)
+
+define <16 x i16> @test_sf_vc_v_xvw_e8m2(<16 x i16> %vd, <16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.xvw.nxv16i16.iXLen.nxv16i8.i8.iXLen(iXLen 3, <16 x i16> %vd, <16 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <16 x i16> %0
+}
+
+declare <16 x i16> @llvm.riscv.sf.vc.v.xvw.nxv16i16.iXLen.nxv16i8.i8.iXLen(iXLen, <16 x i16>, <16 x i8>, i8, iXLen)
+
+define <32 x i16> @test_sf_vc_v_xvw_e8m4(<32 x i16> %vd, <32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.xvw.nxv32i16.iXLen.nxv32i8.i8.iXLen(iXLen 3, <32 x i16> %vd, <32 x i8> %vs2, i8 %rs1, iXLen %vl)
+ ret <32 x i16> %0
+}
+
+declare <32 x i16> @llvm.riscv.sf.vc.v.xvw.nxv32i16.iXLen.nxv32i8.i8.iXLen(iXLen, <32 x i16>, <32 x i8>, i8, iXLen)
+
+define <1 x i32> @test_sf_vc_v_xvw_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.xvw.nxv1i32.iXLen.nxv1i16.i16.iXLen(iXLen 3, <1 x i32> %vd, <1 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <1 x i32> %0
+}
+
+declare <1 x i32> @llvm.riscv.sf.vc.v.xvw.nxv1i32.iXLen.nxv1i16.i16.iXLen(iXLen, <1 x i32>, <1 x i16>, i16, iXLen)
+
+define <2 x i32> @test_sf_vc_v_xvw_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.xvw.nxv2i32.iXLen.nxv2i16.i16.iXLen(iXLen 3, <2 x i32> %vd, <2 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <2 x i32> %0
+}
+
+declare <2 x i32> @llvm.riscv.sf.vc.v.xvw.nxv2i32.iXLen.nxv2i16.i16.iXLen(iXLen, <2 x i32>, <2 x i16>, i16, iXLen)
+
+define <4 x i32> @test_sf_vc_v_xvw_e16m1(<4 x i32> %vd, <4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.xvw.nxv4i32.iXLen.nxv4i16.i16.iXLen(iXLen 3, <4 x i32> %vd, <4 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <4 x i32> %0
+}
+
+declare <4 x i32> @llvm.riscv.sf.vc.v.xvw.nxv4i32.iXLen.nxv4i16.i16.iXLen(iXLen, <4 x i32>, <4 x i16>, i16, iXLen)
+
+define <8 x i32> @test_sf_vc_v_xvw_e16m2(<8 x i32> %vd, <8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.xvw.nxv8i32.iXLen.nxv8i16.i16.iXLen(iXLen 3, <8 x i32> %vd, <8 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <8 x i32> %0
+}
+
+declare <8 x i32> @llvm.riscv.sf.vc.v.xvw.nxv8i32.iXLen.nxv8i16.i16.iXLen(iXLen, <8 x i32>, <8 x i16>, i16, iXLen)
+
+define <16 x i32> @test_sf_vc_v_xvw_e16m4(<16 x i32> %vd, <16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.xvw.nxv16i32.iXLen.nxv16i16.i16.iXLen(iXLen 3, <16 x i32> %vd, <16 x i16> %vs2, i16 %rs1, iXLen %vl)
+ ret <16 x i32> %0
+}
+
+declare <16 x i32> @llvm.riscv.sf.vc.v.xvw.nxv16i32.iXLen.nxv16i16.i16.iXLen(iXLen, <16 x i32>, <16 x i16>, i16, iXLen)
+
+define <1 x i64> @test_sf_vc_v_xvw_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.xvw.nxv1i64.iXLen.nxv1i32.i32.iXLen(iXLen 3, <1 x i64> %vd, <1 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <1 x i64> %0
+}
+
+declare <1 x i64> @llvm.riscv.sf.vc.v.xvw.nxv1i64.iXLen.nxv1i32.i32.iXLen(iXLen, <1 x i64>, <1 x i32>, i32, iXLen)
+
+define <2 x i64> @test_sf_vc_v_xvw_e32m1(<2 x i64> %vd, <2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.xvw.nxv2i64.iXLen.nxv2i32.i32.iXLen(iXLen 3, <2 x i64> %vd, <2 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <2 x i64> %0
+}
+
+declare <2 x i64> @llvm.riscv.sf.vc.v.xvw.nxv2i64.iXLen.nxv2i32.i32.iXLen(iXLen, <2 x i64>, <2 x i32>, i32, iXLen)
+
+define <4 x i64> @test_sf_vc_v_xvw_e32m2(<4 x i64> %vd, <4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.xvw.nxv4i64.iXLen.nxv4i32.i32.iXLen(iXLen 3, <4 x i64> %vd, <4 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <4 x i64> %0
+}
+
+declare <4 x i64> @llvm.riscv.sf.vc.v.xvw.nxv4i64.iXLen.nxv4i32.i32.iXLen(iXLen, <4 x i64>, <4 x i32>, i32, iXLen)
+
+define <8 x i64> @test_sf_vc_v_xvw_e32m4(<8 x i64> %vd, <8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_xvw_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.xvw.nxv8i64.iXLen.nxv8i32.i32.iXLen(iXLen 3, <8 x i64> %vd, <8 x i32> %vs2, i32 %rs1, iXLen %vl)
+ ret <8 x i64> %0
+}
+
+declare <8 x i64> @llvm.riscv.sf.vc.v.xvw.nxv8i64.iXLen.nxv8i32.i32.iXLen(iXLen, <8 x i64>, <8 x i32>, i32, iXLen)
+
+define void @test_sf_vc_ivw_se_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i16.nxv1i8.iXLen.iXLen(iXLen 3, <1 x i16> %vd, <1 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i16.nxv1i8.iXLen.iXLen(iXLen, <1 x i16>, <1 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_ivw_se_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i16.nxv2i8.iXLen.iXLen(iXLen 3, <2 x i16> %vd, <2 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i16.nxv2i8.iXLen.iXLen(iXLen, <2 x i16>, <2 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_ivw_se_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i16.nxv4i8.iXLen.iXLen(iXLen 3, <4 x i16> %vd, <4 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i16.nxv4i8.iXLen.iXLen(iXLen, <4 x i16>, <4 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_ivw_se_e8m1(<8 x i16> %vd, <8 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i16.nxv8i8.iXLen.iXLen(iXLen 3, <8 x i16> %vd, <8 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i16.nxv8i8.iXLen.iXLen(iXLen, <8 x i16>, <8 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_ivw_se_e8m2(<16 x i16> %vd, <16 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: sf.vc.ivw 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16i16.nxv16i8.iXLen.iXLen(iXLen 3, <16 x i16> %vd, <16 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16i16.nxv16i8.iXLen.iXLen(iXLen, <16 x i16>, <16 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_ivw_se_e8m4(<32 x i16> %vd, <32 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: sf.vc.ivw 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv32i16.nxv32i8.iXLen.iXLen(iXLen 3, <32 x i16> %vd, <32 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv32i16.nxv32i8.iXLen.iXLen(iXLen, <32 x i16>, <32 x i8>, iXLen, iXLen)
+
+define void @test_sf_vc_ivw_se_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i32.nxv1i16.iXLen.iXLen(iXLen 3, <1 x i32> %vd, <1 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i32.nxv1i16.iXLen.iXLen(iXLen, <1 x i32>, <1 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_ivw_se_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i32.nxv2i16.iXLen.iXLen(iXLen 3, <2 x i32> %vd, <2 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i32.nxv2i16.iXLen.iXLen(iXLen, <2 x i32>, <2 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_ivw_se_e16m1(<4 x i32> %vd, <4 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i32.nxv4i16.iXLen.iXLen(iXLen 3, <4 x i32> %vd, <4 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i32.nxv4i16.iXLen.iXLen(iXLen, <4 x i32>, <4 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_ivw_se_e16m2(<8 x i32> %vd, <8 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.ivw 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i32.nxv8i16.iXLen.iXLen(iXLen 3, <8 x i32> %vd, <8 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i32.nxv8i16.iXLen.iXLen(iXLen, <8 x i32>, <8 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_ivw_se_e16m4(<16 x i32> %vd, <16 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.ivw 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16i32.nxv16i16.iXLen.iXLen(iXLen 3, <16 x i32> %vd, <16 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16i32.nxv16i16.iXLen.iXLen(iXLen, <16 x i32>, <16 x i16>, iXLen, iXLen)
+
+define void @test_sf_vc_ivw_se_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i64.nxv1i32.iXLen.iXLen(iXLen 3, <1 x i64> %vd, <1 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i64.nxv1i32.iXLen.iXLen(iXLen, <1 x i64>, <1 x i32>, iXLen, iXLen)
+
+define void @test_sf_vc_ivw_se_e32m1(<2 x i64> %vd, <2 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i64.nxv2i32.iXLen.iXLen(iXLen 3, <2 x i64> %vd, <2 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i64.nxv2i32.iXLen.iXLen(iXLen, <2 x i64>, <2 x i32>, iXLen, iXLen)
+
+define void @test_sf_vc_ivw_se_e32m2(<4 x i64> %vd, <4 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.ivw 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i64.nxv4i32.iXLen.iXLen(iXLen 3, <4 x i64> %vd, <4 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i64.nxv4i32.iXLen.iXLen(iXLen, <4 x i64>, <4 x i32>, iXLen, iXLen)
+
+define void @test_sf_vc_ivw_se_e32m4(<8 x i64> %vd, <8 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_ivw_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.ivw 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i64.nxv8i32.iXLen.iXLen(iXLen 3, <8 x i64> %vd, <8 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i64.nxv8i32.iXLen.iXLen(iXLen, <8 x i64>, <8 x i32>, iXLen, iXLen)
+
+define <1 x i16> @test_sf_vc_v_ivw_se_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv1i16.iXLen.nxv1i8.iXLen.iXLen(iXLen 3, <1 x i16> %vd, <1 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <1 x i16> %0
+}
+
+declare <1 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv1i16.iXLen.nxv1i8.iXLen.iXLen(iXLen, <1 x i16>, <1 x i8>, iXLen, iXLen)
+
+define <2 x i16> @test_sf_vc_v_ivw_se_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv2i16.iXLen.nxv2i8.iXLen.iXLen(iXLen 3, <2 x i16> %vd, <2 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <2 x i16> %0
+}
+
+declare <2 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv2i16.iXLen.nxv2i8.iXLen.iXLen(iXLen, <2 x i16>, <2 x i8>, iXLen, iXLen)
+
+define <4 x i16> @test_sf_vc_v_ivw_se_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv4i16.iXLen.nxv4i8.iXLen.iXLen(iXLen 3, <4 x i16> %vd, <4 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <4 x i16> %0
+}
+
+declare <4 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv4i16.iXLen.nxv4i8.iXLen.iXLen(iXLen, <4 x i16>, <4 x i8>, iXLen, iXLen)
+
+define <8 x i16> @test_sf_vc_v_ivw_se_e8m1(<8 x i16> %vd, <8 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv8i16.iXLen.nxv8i8.iXLen.iXLen(iXLen 3, <8 x i16> %vd, <8 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <8 x i16> %0
+}
+
+declare <8 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv8i16.iXLen.nxv8i8.iXLen.iXLen(iXLen, <8 x i16>, <8 x i8>, iXLen, iXLen)
+
+define <16 x i16> @test_sf_vc_v_ivw_se_e8m2(<16 x i16> %vd, <16 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv16i16.iXLen.nxv16i8.iXLen.iXLen(iXLen 3, <16 x i16> %vd, <16 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <16 x i16> %0
+}
+
+declare <16 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv16i16.iXLen.nxv16i8.iXLen.iXLen(iXLen, <16 x i16>, <16 x i8>, iXLen, iXLen)
+
+define <32 x i16> @test_sf_vc_v_ivw_se_e8m4(<32 x i16> %vd, <32 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv32i16.iXLen.nxv32i8.iXLen.iXLen(iXLen 3, <32 x i16> %vd, <32 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <32 x i16> %0
+}
+
+declare <32 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv32i16.iXLen.nxv32i8.iXLen.iXLen(iXLen, <32 x i16>, <32 x i8>, iXLen, iXLen)
+
+define <1 x i32> @test_sf_vc_v_ivw_se_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv1i32.iXLen.nxv1i16.iXLen.iXLen(iXLen 3, <1 x i32> %vd, <1 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <1 x i32> %0
+}
+
+declare <1 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv1i32.iXLen.nxv1i16.iXLen.iXLen(iXLen, <1 x i32>, <1 x i16>, iXLen, iXLen)
+
+define <2 x i32> @test_sf_vc_v_ivw_se_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv2i32.iXLen.nxv2i16.iXLen.iXLen(iXLen 3, <2 x i32> %vd, <2 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <2 x i32> %0
+}
+
+declare <2 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv2i32.iXLen.nxv2i16.iXLen.iXLen(iXLen, <2 x i32>, <2 x i16>, iXLen, iXLen)
+
+define <4 x i32> @test_sf_vc_v_ivw_se_e16m1(<4 x i32> %vd, <4 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv4i32.iXLen.nxv4i16.iXLen.iXLen(iXLen 3, <4 x i32> %vd, <4 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <4 x i32> %0
+}
+
+declare <4 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv4i32.iXLen.nxv4i16.iXLen.iXLen(iXLen, <4 x i32>, <4 x i16>, iXLen, iXLen)
+
+define <8 x i32> @test_sf_vc_v_ivw_se_e16m2(<8 x i32> %vd, <8 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv8i32.iXLen.nxv8i16.iXLen.iXLen(iXLen 3, <8 x i32> %vd, <8 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <8 x i32> %0
+}
+
+declare <8 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv8i32.iXLen.nxv8i16.iXLen.iXLen(iXLen, <8 x i32>, <8 x i16>, iXLen, iXLen)
+
+define <16 x i32> @test_sf_vc_v_ivw_se_e16m4(<16 x i32> %vd, <16 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv16i32.iXLen.nxv16i16.iXLen.iXLen(iXLen 3, <16 x i32> %vd, <16 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <16 x i32> %0
+}
+
+declare <16 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv16i32.iXLen.nxv16i16.iXLen.iXLen(iXLen, <16 x i32>, <16 x i16>, iXLen, iXLen)
+
+define <1 x i64> @test_sf_vc_v_ivw_se_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv1i64.iXLen.nxv1i32.iXLen.iXLen(iXLen 3, <1 x i64> %vd, <1 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <1 x i64> %0
+}
+
+declare <1 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv1i64.iXLen.nxv1i32.iXLen.iXLen(iXLen, <1 x i64>, <1 x i32>, iXLen, iXLen)
+
+define <2 x i64> @test_sf_vc_v_ivw_se_e32m1(<2 x i64> %vd, <2 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv2i64.iXLen.nxv2i32.iXLen.iXLen(iXLen 3, <2 x i64> %vd, <2 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <2 x i64> %0
+}
+
+declare <2 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv2i64.iXLen.nxv2i32.iXLen.iXLen(iXLen, <2 x i64>, <2 x i32>, iXLen, iXLen)
+
+define <4 x i64> @test_sf_vc_v_ivw_se_e32m2(<4 x i64> %vd, <4 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv4i64.iXLen.nxv4i32.iXLen.iXLen(iXLen 3, <4 x i64> %vd, <4 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <4 x i64> %0
+}
+
+declare <4 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv4i64.iXLen.nxv4i32.iXLen.iXLen(iXLen, <4 x i64>, <4 x i32>, iXLen, iXLen)
+
+define <8 x i64> @test_sf_vc_v_ivw_se_e32m4(<8 x i64> %vd, <8 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen 3, <8 x i64> %vd, <8 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <8 x i64> %0
+}
+
+declare <8 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen, <8 x i64>, <8 x i32>, iXLen, iXLen)
+
+define <1 x i16> @test_sf_vc_v_ivw_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e8mf8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.ivw.nxv1i16.iXLen.nxv1i8.iXLen.iXLen(iXLen 3, <1 x i16> %vd, <1 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <1 x i16> %0
+}
+
+declare <1 x i16> @llvm.riscv.sf.vc.v.ivw.nxv1i16.iXLen.nxv1i8.iXLen.iXLen(iXLen, <1 x i16>, <1 x i8>, iXLen, iXLen)
+
+define <2 x i16> @test_sf_vc_v_ivw_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e8mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.ivw.nxv2i16.iXLen.nxv2i8.iXLen.iXLen(iXLen 3, <2 x i16> %vd, <2 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <2 x i16> %0
+}
+
+declare <2 x i16> @llvm.riscv.sf.vc.v.ivw.nxv2i16.iXLen.nxv2i8.iXLen.iXLen(iXLen, <2 x i16>, <2 x i8>, iXLen, iXLen)
+
+define <4 x i16> @test_sf_vc_v_ivw_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e8mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.ivw.nxv4i16.iXLen.nxv4i8.iXLen.iXLen(iXLen 3, <4 x i16> %vd, <4 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <4 x i16> %0
+}
+
+declare <4 x i16> @llvm.riscv.sf.vc.v.ivw.nxv4i16.iXLen.nxv4i8.iXLen.iXLen(iXLen, <4 x i16>, <4 x i8>, iXLen, iXLen)
+
+define <8 x i16> @test_sf_vc_v_ivw_e8m1(<8 x i16> %vd, <8 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e8m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.ivw.nxv8i16.iXLen.nxv8i8.iXLen.iXLen(iXLen 3, <8 x i16> %vd, <8 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <8 x i16> %0
+}
+
+declare <8 x i16> @llvm.riscv.sf.vc.v.ivw.nxv8i16.iXLen.nxv8i8.iXLen.iXLen(iXLen, <8 x i16>, <8 x i8>, iXLen, iXLen)
+
+define <16 x i16> @test_sf_vc_v_ivw_e8m2(<16 x i16> %vd, <16 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e8m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.ivw.nxv16i16.iXLen.nxv16i8.iXLen.iXLen(iXLen 3, <16 x i16> %vd, <16 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <16 x i16> %0
+}
+
+declare <16 x i16> @llvm.riscv.sf.vc.v.ivw.nxv16i16.iXLen.nxv16i8.iXLen.iXLen(iXLen, <16 x i16>, <16 x i8>, iXLen, iXLen)
+
+define <32 x i16> @test_sf_vc_v_ivw_e8m4(<32 x i16> %vd, <32 x i8> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e8m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.ivw.nxv32i16.iXLen.nxv32i8.iXLen.iXLen(iXLen 3, <32 x i16> %vd, <32 x i8> %vs2, iXLen 10, iXLen %vl)
+ ret <32 x i16> %0
+}
+
+declare <32 x i16> @llvm.riscv.sf.vc.v.ivw.nxv32i16.iXLen.nxv32i8.iXLen.iXLen(iXLen, <32 x i16>, <32 x i8>, iXLen, iXLen)
+
+define <1 x i32> @test_sf_vc_v_ivw_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.ivw.nxv1i32.iXLen.nxv1i16.iXLen.iXLen(iXLen 3, <1 x i32> %vd, <1 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <1 x i32> %0
+}
+
+declare <1 x i32> @llvm.riscv.sf.vc.v.ivw.nxv1i32.iXLen.nxv1i16.iXLen.iXLen(iXLen, <1 x i32>, <1 x i16>, iXLen, iXLen)
+
+define <2 x i32> @test_sf_vc_v_ivw_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.ivw.nxv2i32.iXLen.nxv2i16.iXLen.iXLen(iXLen 3, <2 x i32> %vd, <2 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <2 x i32> %0
+}
+
+declare <2 x i32> @llvm.riscv.sf.vc.v.ivw.nxv2i32.iXLen.nxv2i16.iXLen.iXLen(iXLen, <2 x i32>, <2 x i16>, iXLen, iXLen)
+
+define <4 x i32> @test_sf_vc_v_ivw_e16m1(<4 x i32> %vd, <4 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.ivw.nxv4i32.iXLen.nxv4i16.iXLen.iXLen(iXLen 3, <4 x i32> %vd, <4 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <4 x i32> %0
+}
+
+declare <4 x i32> @llvm.riscv.sf.vc.v.ivw.nxv4i32.iXLen.nxv4i16.iXLen.iXLen(iXLen, <4 x i32>, <4 x i16>, iXLen, iXLen)
+
+define <8 x i32> @test_sf_vc_v_ivw_e16m2(<8 x i32> %vd, <8 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.ivw.nxv8i32.iXLen.nxv8i16.iXLen.iXLen(iXLen 3, <8 x i32> %vd, <8 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <8 x i32> %0
+}
+
+declare <8 x i32> @llvm.riscv.sf.vc.v.ivw.nxv8i32.iXLen.nxv8i16.iXLen.iXLen(iXLen, <8 x i32>, <8 x i16>, iXLen, iXLen)
+
+define <16 x i32> @test_sf_vc_v_ivw_e16m4(<16 x i32> %vd, <16 x i16> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.ivw.nxv16i32.iXLen.nxv16i16.iXLen.iXLen(iXLen 3, <16 x i32> %vd, <16 x i16> %vs2, iXLen 10, iXLen %vl)
+ ret <16 x i32> %0
+}
+
+declare <16 x i32> @llvm.riscv.sf.vc.v.ivw.nxv16i32.iXLen.nxv16i16.iXLen.iXLen(iXLen, <16 x i32>, <16 x i16>, iXLen, iXLen)
+
+define <1 x i64> @test_sf_vc_v_ivw_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.ivw.nxv1i64.iXLen.nxv1i32.iXLen.iXLen(iXLen 3, <1 x i64> %vd, <1 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <1 x i64> %0
+}
+
+declare <1 x i64> @llvm.riscv.sf.vc.v.ivw.nxv1i64.iXLen.nxv1i32.iXLen.iXLen(iXLen, <1 x i64>, <1 x i32>, iXLen, iXLen)
+
+define <2 x i64> @test_sf_vc_v_ivw_e32m1(<2 x i64> %vd, <2 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.ivw.nxv2i64.iXLen.nxv2i32.iXLen.iXLen(iXLen 3, <2 x i64> %vd, <2 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <2 x i64> %0
+}
+
+declare <2 x i64> @llvm.riscv.sf.vc.v.ivw.nxv2i64.iXLen.nxv2i32.iXLen.iXLen(iXLen, <2 x i64>, <2 x i32>, iXLen, iXLen)
+
+define <4 x i64> @test_sf_vc_v_ivw_e32m2(<4 x i64> %vd, <4 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.ivw.nxv4i64.iXLen.nxv4i32.iXLen.iXLen(iXLen 3, <4 x i64> %vd, <4 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <4 x i64> %0
+}
+
+declare <4 x i64> @llvm.riscv.sf.vc.v.ivw.nxv4i64.iXLen.nxv4i32.iXLen.iXLen(iXLen, <4 x i64>, <4 x i32>, iXLen, iXLen)
+
+define <8 x i64> @test_sf_vc_v_ivw_e32m4(<8 x i64> %vd, <8 x i32> %vs2, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_ivw_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.ivw.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen 3, <8 x i64> %vd, <8 x i32> %vs2, iXLen 10, iXLen %vl)
+ ret <8 x i64> %0
+}
+
+declare <8 x i64> @llvm.riscv.sf.vc.v.ivw.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen, <8 x i64>, <8 x i32>, iXLen, iXLen)
+
+define void @test_sf_vc_fvw_se_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvw_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1i32.nxv1i16.f16.iXLen(iXLen 1, <1 x i32> %vd, <1 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1i32.nxv1i16.f16.iXLen(iXLen, <1 x i32>, <1 x i16>, half, iXLen)
+
+define void @test_sf_vc_fvw_se_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvw_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2i32.nxv2i16.f16.iXLen(iXLen 1, <2 x i32> %vd, <2 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2i32.nxv2i16.f16.iXLen(iXLen, <2 x i32>, <2 x i16>, half, iXLen)
+
+define void @test_sf_vc_fvw_se_e16m1(<4 x i32> %vd, <4 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvw_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4i32.nxv4i16.f16.iXLen(iXLen 1, <4 x i32> %vd, <4 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4i32.nxv4i16.f16.iXLen(iXLen, <4 x i32>, <4 x i16>, half, iXLen)
+
+define void @test_sf_vc_fvw_se_e16m2(<8 x i32> %vd, <8 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvw_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: sf.vc.fvw 1, v8, v10, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8i32.nxv8i16.f16.iXLen(iXLen 1, <8 x i32> %vd, <8 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8i32.nxv8i16.f16.iXLen(iXLen, <8 x i32>, <8 x i16>, half, iXLen)
+
+define void @test_sf_vc_fvw_se_e16m4(<16 x i32> %vd, <16 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvw_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: sf.vc.fvw 1, v8, v12, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv16i32.nxv16i16.f16.iXLen(iXLen 1, <16 x i32> %vd, <16 x i16> %vs2, half %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv16i32.nxv16i16.f16.iXLen(iXLen, <16 x i32>, <16 x i16>, half, iXLen)
+
+define void @test_sf_vc_fvw_se_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvw_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1i64.nxv1i32.f32.iXLen(iXLen 1, <1 x i64> %vd, <1 x i32> %vs2, float %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1i64.nxv1i32.f32.iXLen(iXLen, <1 x i64>, <1 x i32>, float, iXLen)
+
+define void @test_sf_vc_fvw_se_e32m1(<2 x i64> %vd, <2 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvw_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2i64.nxv2i32.f32.iXLen(iXLen 1, <2 x i64> %vd, <2 x i32> %vs2, float %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2i64.nxv2i32.f32.iXLen(iXLen, <2 x i64>, <2 x i32>, float, iXLen)
+
+define void @test_sf_vc_fvw_se_e32m2(<4 x i64> %vd, <4 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvw_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: sf.vc.fvw 1, v8, v10, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4i64.nxv4i32.f32.iXLen(iXLen 1, <4 x i64> %vd, <4 x i32> %vs2, float %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4i64.nxv4i32.f32.iXLen(iXLen, <4 x i64>, <4 x i32>, float, iXLen)
+
+define void @test_sf_vc_fvw_se_e32m4(<8 x i64> %vd, <8 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_fvw_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: sf.vc.fvw 1, v8, v12, fa0
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8i64.nxv8i32.f32.iXLen(iXLen 1, <8 x i64> %vd, <8 x i32> %vs2, float %fs1, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8i64.nxv8i32.f32.iXLen(iXLen, <8 x i64>, <8 x i32>, float, iXLen)
+
+define <1 x i32> @test_sf_vc_v_fvw_se_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_se_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv1i32.iXLen.nxv1i16.f16.iXLen(iXLen 1, <1 x i32> %vd, <1 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <1 x i32> %0
+}
+
+declare <1 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv1i32.iXLen.nxv1i16.f16.iXLen(iXLen, <1 x i32>, <1 x i16>, half, iXLen)
+
+define <2 x i32> @test_sf_vc_v_fvw_se_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_se_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv2i32.iXLen.nxv2i16.f16.iXLen(iXLen 1, <2 x i32> %vd, <2 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <2 x i32> %0
+}
+
+declare <2 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv2i32.iXLen.nxv2i16.f16.iXLen(iXLen, <2 x i32>, <2 x i16>, half, iXLen)
+
+define <4 x i32> @test_sf_vc_v_fvw_se_e16m1(<4 x i32> %vd, <4 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_se_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv4i32.iXLen.nxv4i16.f16.iXLen(iXLen 1, <4 x i32> %vd, <4 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <4 x i32> %0
+}
+
+declare <4 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv4i32.iXLen.nxv4i16.f16.iXLen(iXLen, <4 x i32>, <4 x i16>, half, iXLen)
+
+define <8 x i32> @test_sf_vc_v_fvw_se_e16m2(<8 x i32> %vd, <8 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_se_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv8i32.iXLen.nxv8i16.f16.iXLen(iXLen 1, <8 x i32> %vd, <8 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <8 x i32> %0
+}
+
+declare <8 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv8i32.iXLen.nxv8i16.f16.iXLen(iXLen, <8 x i32>, <8 x i16>, half, iXLen)
+
+define <16 x i32> @test_sf_vc_v_fvw_se_e16m4(<16 x i32> %vd, <16 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_se_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv16i32.iXLen.nxv16i16.f16.iXLen(iXLen 1, <16 x i32> %vd, <16 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <16 x i32> %0
+}
+
+declare <16 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv16i32.iXLen.nxv16i16.f16.iXLen(iXLen, <16 x i32>, <16 x i16>, half, iXLen)
+
+define <1 x i64> @test_sf_vc_v_fvw_se_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_se_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv1i64.iXLen.nxv1i32.f32.iXLen(iXLen 1, <1 x i64> %vd, <1 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <1 x i64> %0
+}
+
+declare <1 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv1i64.iXLen.nxv1i32.f32.iXLen(iXLen, <1 x i64>, <1 x i32>, float, iXLen)
+
+define <2 x i64> @test_sf_vc_v_fvw_se_e32m1(<2 x i64> %vd, <2 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_se_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv2i64.iXLen.nxv2i32.f32.iXLen(iXLen 1, <2 x i64> %vd, <2 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <2 x i64> %0
+}
+
+declare <2 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv2i64.iXLen.nxv2i32.f32.iXLen(iXLen, <2 x i64>, <2 x i32>, float, iXLen)
+
+define <4 x i64> @test_sf_vc_v_fvw_se_e32m2(<4 x i64> %vd, <4 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_se_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv4i64.iXLen.nxv4i32.f32.iXLen(iXLen 1, <4 x i64> %vd, <4 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <4 x i64> %0
+}
+
+declare <4 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv4i64.iXLen.nxv4i32.f32.iXLen(iXLen, <4 x i64>, <4 x i32>, float, iXLen)
+
+define <8 x i64> @test_sf_vc_v_fvw_se_e32m4(<8 x i64> %vd, <8 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_se_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv8i64.iXLen.nxv8i32.f32.iXLen(iXLen 1, <8 x i64> %vd, <8 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <8 x i64> %0
+}
+
+declare <8 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv8i64.iXLen.nxv8i32.f32.iXLen(iXLen, <8 x i64>, <8 x i32>, float, iXLen)
+
+define <1 x i32> @test_sf_vc_v_fvw_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_e16mf4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.fvw.nxv1i32.iXLen.nxv1i16.f16.iXLen(iXLen 1, <1 x i32> %vd, <1 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <1 x i32> %0
+}
+
+declare <1 x i32> @llvm.riscv.sf.vc.v.fvw.nxv1i32.iXLen.nxv1i16.f16.iXLen(iXLen, <1 x i32>, <1 x i16>, half, iXLen)
+
+define <2 x i32> @test_sf_vc_v_fvw_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_e16mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.fvw.nxv2i32.iXLen.nxv2i16.f16.iXLen(iXLen 1, <2 x i32> %vd, <2 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <2 x i32> %0
+}
+
+declare <2 x i32> @llvm.riscv.sf.vc.v.fvw.nxv2i32.iXLen.nxv2i16.f16.iXLen(iXLen, <2 x i32>, <2 x i16>, half, iXLen)
+
+define <4 x i32> @test_sf_vc_v_fvw_e16m1(<4 x i32> %vd, <4 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_e16m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.fvw.nxv4i32.iXLen.nxv4i16.f16.iXLen(iXLen 1, <4 x i32> %vd, <4 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <4 x i32> %0
+}
+
+declare <4 x i32> @llvm.riscv.sf.vc.v.fvw.nxv4i32.iXLen.nxv4i16.f16.iXLen(iXLen, <4 x i32>, <4 x i16>, half, iXLen)
+
+define <8 x i32> @test_sf_vc_v_fvw_e16m2(<8 x i32> %vd, <8 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_e16m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.fvw.nxv8i32.iXLen.nxv8i16.f16.iXLen(iXLen 1, <8 x i32> %vd, <8 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <8 x i32> %0
+}
+
+declare <8 x i32> @llvm.riscv.sf.vc.v.fvw.nxv8i32.iXLen.nxv8i16.f16.iXLen(iXLen, <8 x i32>, <8 x i16>, half, iXLen)
+
+define <16 x i32> @test_sf_vc_v_fvw_e16m4(<16 x i32> %vd, <16 x i16> %vs2, half %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_e16m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.fvw.nxv16i32.iXLen.nxv16i16.f16.iXLen(iXLen 1, <16 x i32> %vd, <16 x i16> %vs2, half %fs1, iXLen %vl)
+ ret <16 x i32> %0
+}
+
+declare <16 x i32> @llvm.riscv.sf.vc.v.fvw.nxv16i32.iXLen.nxv16i16.f16.iXLen(iXLen, <16 x i32>, <16 x i16>, half, iXLen)
+
+define <1 x i64> @test_sf_vc_v_fvw_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_e32mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.fvw.nxv1i64.iXLen.nxv1i32.f32.iXLen(iXLen 1, <1 x i64> %vd, <1 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <1 x i64> %0
+}
+
+declare <1 x i64> @llvm.riscv.sf.vc.v.fvw.nxv1i64.iXLen.nxv1i32.f32.iXLen(iXLen, <1 x i64>, <1 x i32>, float, iXLen)
+
+define <2 x i64> @test_sf_vc_v_fvw_e32m1(<2 x i64> %vd, <2 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_e32m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.fvw.nxv2i64.iXLen.nxv2i32.f32.iXLen(iXLen 1, <2 x i64> %vd, <2 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <2 x i64> %0
+}
+
+declare <2 x i64> @llvm.riscv.sf.vc.v.fvw.nxv2i64.iXLen.nxv2i32.f32.iXLen(iXLen, <2 x i64>, <2 x i32>, float, iXLen)
+
+define <4 x i64> @test_sf_vc_v_fvw_e32m2(<4 x i64> %vd, <4 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_e32m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.fvw.nxv4i64.iXLen.nxv4i32.f32.iXLen(iXLen 1, <4 x i64> %vd, <4 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <4 x i64> %0
+}
+
+declare <4 x i64> @llvm.riscv.sf.vc.v.fvw.nxv4i64.iXLen.nxv4i32.f32.iXLen(iXLen, <4 x i64>, <4 x i32>, float, iXLen)
+
+define <8 x i64> @test_sf_vc_v_fvw_e32m4(<8 x i64> %vd, <8 x i32> %vs2, float %fs1, iXLen %vl) {
+; CHECK-LABEL: test_sf_vc_v_fvw_e32m4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.fvw.nxv8i64.iXLen.nxv8i32.f32.iXLen(iXLen 1, <8 x i64> %vd, <8 x i32> %vs2, float %fs1, iXLen %vl)
+ ret <8 x i64> %0
+}
+
+declare <8 x i64> @llvm.riscv.sf.vc.v.fvw.nxv8i64.iXLen.nxv8i32.f32.iXLen(iXLen, <8 x i64>, <8 x i32>, float, iXLen)
>From 97e649bef893c04d7b786367025b577995e2b56b Mon Sep 17 00:00:00 2001
From: Brandon Wu <brandon.wu at sifive.com>
Date: Mon, 25 Sep 2023 20:19:16 -0700
Subject: [PATCH 2/2] fixup! [RISCV] Support VLS for VCIX
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 5fdfb7b6bf9a264..d7052e16b833583 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -8234,11 +8234,12 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
SDLoc DL(Op);
MVT RetContainerVT = getContainerForFixedLengthVector(VT);
+ SDVTList VTs = DAG.getVTList({RetContainerVT, MVT::Other});
SDValue ScalableVector =
- DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, RetContainerVT, Ops);
+ DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops);
SDValue FixedVector =
convertFromScalableVector(VT, ScalableVector, DAG, Subtarget);
- return DAG.getMergeValues({FixedVector, Op.getOperand(0)}, DL);
+ return DAG.getMergeValues({FixedVector, ScalableVector.getValue(1)}, DL);
}
}
More information about the llvm-commits
mailing list